code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py __lowerCamelCase : Tuple = '''src/transformers''' __lowerCamelCase : Any = '''docs/source/en''' __lowerCamelCase : str = '''.''' def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]: """simple docstring""" with open(__magic_name__ ,"r" ,encoding="utf-8" ,newline="\n" ) as f: snake_case_ : Optional[Any] = f.readlines() # Find the start prompt. snake_case_ : Optional[Any] = 0 while not lines[start_index].startswith(__magic_name__ ): start_index += 1 start_index += 1 snake_case_ : List[str] = start_index while not lines[end_index].startswith(__magic_name__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | __lowerCamelCase : str = '''Model|Encoder|Decoder|ForConditionalGeneration''' # Regexes that match TF/Flax/PT model names. __lowerCamelCase : Tuple = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') __lowerCamelCase : List[Any] = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. __lowerCamelCase : Dict = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') # This is to make sure the transformers module imported is the one in the repo. __lowerCamelCase : str = direct_transformers_import(TRANSFORMERS_PATH) def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]: """simple docstring""" snake_case_ : Union[str, Any] = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" ,__magic_name__ ) return [m.group(0 ) for m in matches] def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Optional[int]: """simple docstring""" snake_case_ : Union[str, Any] = 2 if text == "✅" or text == "❌" else len(__magic_name__ ) snake_case_ : List[Any] = (width - text_length) // 2 snake_case_ : str = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def __UpperCAmelCase ( )-> Dict: """simple docstring""" snake_case_ : Any = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES snake_case_ : int = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } snake_case_ : int = {name: config.replace("Config" ,"" ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. snake_case_ : Union[str, Any] = collections.defaultdict(__magic_name__ ) snake_case_ : List[Any] = collections.defaultdict(__magic_name__ ) snake_case_ : Dict = collections.defaultdict(__magic_name__ ) snake_case_ : List[str] = collections.defaultdict(__magic_name__ ) snake_case_ : Optional[Any] = collections.defaultdict(__magic_name__ ) # Let's lookup through all transformers object (once). for attr_name in dir(__magic_name__ ): snake_case_ : Optional[Any] = None if attr_name.endswith("Tokenizer" ): snake_case_ : Any = slow_tokenizers snake_case_ : Union[str, Any] = attr_name[:-9] elif attr_name.endswith("TokenizerFast" ): snake_case_ : List[Any] = fast_tokenizers snake_case_ : Optional[Any] = attr_name[:-13] elif _re_tf_models.match(__magic_name__ ) is not None: snake_case_ : str = tf_models snake_case_ : List[Any] = _re_tf_models.match(__magic_name__ ).groups()[0] elif _re_flax_models.match(__magic_name__ ) is not None: snake_case_ : int = flax_models snake_case_ : Tuple = _re_flax_models.match(__magic_name__ ).groups()[0] elif _re_pt_models.match(__magic_name__ ) is not None: snake_case_ : List[Any] = pt_models snake_case_ : Union[str, Any] = _re_pt_models.match(__magic_name__ ).groups()[0] if lookup_dict is not None: while len(__magic_name__ ) > 0: if attr_name in model_name_to_prefix.values(): snake_case_ : Union[str, Any] = True break # Try again after removing the last word in the name snake_case_ : Any = "".join(camel_case_split(__magic_name__ )[:-1] ) # Let's build that table! snake_case_ : List[Any] = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) snake_case_ : List[Any] = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). snake_case_ : List[Any] = [len(__magic_name__ ) + 2 for c in columns] snake_case_ : Optional[Any] = max([len(__magic_name__ ) for name in model_names] ) + 2 # Build the table per se snake_case_ : Optional[Any] = "|" + "|".join([_center_text(__magic_name__ ,__magic_name__ ) for c, w in zip(__magic_name__ ,__magic_name__ )] ) + "|\n" # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n" snake_case_ : Optional[Any] = {True: "✅", False: "❌"} for name in model_names: snake_case_ : List[Any] = model_name_to_prefix[name] snake_case_ : Tuple = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(__magic_name__ ,__magic_name__ ) for l, w in zip(__magic_name__ ,__magic_name__ )] ) + "|\n" return table def __UpperCAmelCase ( __magic_name__=False )-> Tuple: """simple docstring""" snake_case_, snake_case_, snake_case_, snake_case_ : List[Any] = _find_text_in_file( filename=os.path.join(__magic_name__ ,"index.md" ) ,start_prompt="<!--This table is updated automatically from the auto modules" ,end_prompt="<!-- End table-->" ,) snake_case_ : List[Any] = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(__magic_name__ ,"index.md" ) ,"w" ,encoding="utf-8" ,newline="\n" ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( "The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." ) if __name__ == "__main__": __lowerCamelCase : Dict = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') __lowerCamelCase : List[str] = parser.parse_args() check_model_table(args.fix_and_overwrite)
656
'''simple docstring''' import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets __lowerCamelCase : str = '''\ @inproceedings{snover-etal-2006-study, title = "A Study of Translation Edit Rate with Targeted Human Annotation", author = "Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John", booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers", month = aug # " 8-12", year = "2006", address = "Cambridge, Massachusetts, USA", publisher = "Association for Machine Translation in the Americas", url = "https://aclanthology.org/2006.amta-papers.25", pages = "223--231", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' __lowerCamelCase : Dict = '''\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. ''' __lowerCamelCase : int = ''' Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: \'score\' (float): TER score (num_edits / sum_ref_lengths * 100) \'num_edits\' (int): The cumulative number of edits \'ref_length\' (float): The cumulative average reference length Examples: Example 1: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0} Example 2: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0} Example 3: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5} Example 4: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0} Example 5: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A_ (datasets.Metric ): """simple docstring""" def _A ( self :Optional[Any] ) -> Optional[int]: '''simple docstring''' if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[ "https://github.com/jhclark/tercom", ] , ) def _A ( self :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[str] = len(references[0] ) if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) snake_case_ : List[str] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )] snake_case_ : List[str] = TER( normalized=lowerCAmelCase__ , no_punct=lowerCAmelCase__ , asian_support=lowerCAmelCase__ , case_sensitive=lowerCAmelCase__ , ) snake_case_ : Any = sb_ter.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
656
1
'''simple docstring''' import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import Callable, Dict, List, Tuple import timm import torch import torch.nn as nn from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf from huggingface_hub import cached_download, hf_hub_url from torch import Tensor from vissl.models.model_helpers import get_trunk_forward_outputs from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel from transformers.utils import logging logging.set_verbosity_info() __lowerCamelCase : Union[str, Any] = logging.get_logger() @dataclass class A_ : """simple docstring""" a__ = 42 a__ = field(default_factory=a_ ) a__ = field(default_factory=a_ ) def _A ( self :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Tensor , lowerCAmelCase__ :Tensor ) -> Optional[Any]: '''simple docstring''' snake_case_ : Tuple = len(list(m.modules() ) ) == 1 or isinstance(lowerCAmelCase__ , nn.Convad ) or isinstance(lowerCAmelCase__ , nn.BatchNormad ) if has_not_submodules: self.traced.append(lowerCAmelCase__ ) def __call__( self :Tuple , lowerCAmelCase__ :Tensor ) -> Any: '''simple docstring''' for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(lowerCAmelCase__ ) [x.remove() for x in self.handles] return self @property def _A ( self :List[Any] ) -> Tuple: '''simple docstring''' return list(filter(lambda lowerCAmelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class A_ : """simple docstring""" a__ = 42 a__ = 42 a__ = 1 a__ = field(default_factory=a_ ) a__ = field(default_factory=a_ ) a__ = True def __call__( self :List[str] , lowerCAmelCase__ :Tensor ) -> List[Any]: '''simple docstring''' snake_case_ : Tuple = Tracker(self.dest )(lowerCAmelCase__ ).parametrized snake_case_ : Tuple = Tracker(self.src )(lowerCAmelCase__ ).parametrized snake_case_ : Optional[int] = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.src_skip , lowerCAmelCase__ ) ) snake_case_ : Optional[int] = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.dest_skip , lowerCAmelCase__ ) ) if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ) and self.raise_if_mismatch: raise Exception( F'''Numbers of operations are different. Source module has {len(lowerCAmelCase__ )} operations while''' F''' destination module has {len(lowerCAmelCase__ )}.''' ) for dest_m, src_m in zip(lowerCAmelCase__ , lowerCAmelCase__ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F'''Transfered from={src_m} to={dest_m}''' ) class A_ (nn.Module ): """simple docstring""" def __init__( self :str , lowerCAmelCase__ :nn.Module ) -> str: '''simple docstring''' super().__init__() snake_case_ : List[Tuple[str, nn.Module]] = [] # - get the stem feature_blocks.append(("conv1", model.stem) ) # - get all the feature blocks for k, v in model.trunk_output.named_children(): assert k.startswith("block" ), F'''Unexpected layer name {k}''' snake_case_ : str = len(lowerCAmelCase__ ) + 1 feature_blocks.append((F'''res{block_index}''', v) ) snake_case_ : str = nn.ModuleDict(lowerCAmelCase__ ) def _A ( self :Union[str, Any] , lowerCAmelCase__ :Tensor ) -> Optional[Any]: '''simple docstring''' return get_trunk_forward_outputs( lowerCAmelCase__ , out_feat_keys=lowerCAmelCase__ , feature_blocks=self._feature_blocks , ) class A_ (a_ ): """simple docstring""" def _A ( self :str , lowerCAmelCase__ :str ) -> str: '''simple docstring''' snake_case_ : Tuple = x.split("-" ) return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] ) def __getitem__( self :Any , lowerCAmelCase__ :str ) -> Callable[[], Tuple[nn.Module, Dict]]: '''simple docstring''' if x not in self: snake_case_ : Optional[Any] = self.convert_name_to_timm(lowerCAmelCase__ ) snake_case_ : Any = partial(lambda: (timm.create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ ).eval(), None) ) else: snake_case_ : Any = super().__getitem__(lowerCAmelCase__ ) return val class A_ (a_ ): """simple docstring""" def __getitem__( self :Union[str, Any] , lowerCAmelCase__ :str ) -> Callable[[], nn.Module]: '''simple docstring''' if "seer" in x and "in1k" not in x: snake_case_ : List[Any] = RegNetModel else: snake_case_ : Any = RegNetForImageClassification return val def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[int]: """simple docstring""" for from_key, to_key in keys: snake_case_ : List[Any] = from_state_dict[from_key].clone() print(F'''Copied key={from_key} to={to_key}''' ) return to_state_dict def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ = True ,)-> Union[str, Any]: """simple docstring""" print(F'''Converting {name}...''' ) with torch.no_grad(): snake_case_, snake_case_ : Tuple = from_model_func() snake_case_ : str = our_model_func(__magic_name__ ).eval() snake_case_ : List[Any] = ModuleTransfer(src=__magic_name__ ,dest=__magic_name__ ,raise_if_mismatch=__magic_name__ ) snake_case_ : List[Any] = torch.randn((1, 3, 224, 224) ) module_transfer(__magic_name__ ) if from_state_dict is not None: snake_case_ : Optional[int] = [] # for seer - in1k finetuned we have to manually copy the head if "seer" in name and "in1k" in name: snake_case_ : Tuple = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")] snake_case_ : Union[str, Any] = manually_copy_vissl_head(__magic_name__ ,our_model.state_dict() ,__magic_name__ ) our_model.load_state_dict(__magic_name__ ) snake_case_ : str = our_model(__magic_name__ ,output_hidden_states=__magic_name__ ) snake_case_ : Optional[Any] = ( our_outputs.logits if isinstance(__magic_name__ ,__magic_name__ ) else our_outputs.last_hidden_state ) snake_case_ : Tuple = from_model(__magic_name__ ) snake_case_ : str = from_output[-1] if type(__magic_name__ ) is list else from_output # now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state if "seer" in name and "in1k" in name: snake_case_ : Tuple = our_outputs.hidden_states[-1] assert torch.allclose(__magic_name__ ,__magic_name__ ), "The model logits don't match the original one." if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / name ,commit_message="Add model" ,use_temp_dir=__magic_name__ ,) snake_case_ : List[str] = 224 if "seer" not in name else 384 # we can use the convnext one snake_case_ : str = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" ,size=__magic_name__ ) image_processor.push_to_hub( repo_path_or_name=save_directory / name ,commit_message="Add image processor" ,use_temp_dir=__magic_name__ ,) print(F'''Pushed {name}''' ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = None ,__magic_name__ = True )-> Optional[Any]: """simple docstring""" snake_case_ : int = "imagenet-1k-id2label.json" snake_case_ : Dict = 1000 snake_case_ : Union[str, Any] = (1, num_labels) snake_case_ : str = "huggingface/label-files" snake_case_ : Dict = num_labels snake_case_ : int = json.load(open(cached_download(hf_hub_url(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ) ,"r" ) ) snake_case_ : int = {int(__magic_name__ ): v for k, v in idalabel.items()} snake_case_ : Any = idalabel snake_case_ : Dict = {v: k for k, v in idalabel.items()} snake_case_ : str = partial(__magic_name__ ,num_labels=__magic_name__ ,idalabel=__magic_name__ ,labelaid=__magic_name__ ) snake_case_ : Dict = { "regnet-x-002": ImageNetPreTrainedConfig( depths=[1, 1, 4, 7] ,hidden_sizes=[24, 56, 152, 368] ,groups_width=8 ,layer_type="x" ), "regnet-x-004": ImageNetPreTrainedConfig( depths=[1, 2, 7, 12] ,hidden_sizes=[32, 64, 160, 384] ,groups_width=16 ,layer_type="x" ), "regnet-x-006": ImageNetPreTrainedConfig( depths=[1, 3, 5, 7] ,hidden_sizes=[48, 96, 240, 528] ,groups_width=24 ,layer_type="x" ), "regnet-x-008": ImageNetPreTrainedConfig( depths=[1, 3, 7, 5] ,hidden_sizes=[64, 128, 288, 672] ,groups_width=16 ,layer_type="x" ), "regnet-x-016": ImageNetPreTrainedConfig( depths=[2, 4, 10, 2] ,hidden_sizes=[72, 168, 408, 912] ,groups_width=24 ,layer_type="x" ), "regnet-x-032": ImageNetPreTrainedConfig( depths=[2, 6, 15, 2] ,hidden_sizes=[96, 192, 432, 1008] ,groups_width=48 ,layer_type="x" ), "regnet-x-040": ImageNetPreTrainedConfig( depths=[2, 5, 14, 2] ,hidden_sizes=[80, 240, 560, 1360] ,groups_width=40 ,layer_type="x" ), "regnet-x-064": ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] ,hidden_sizes=[168, 392, 784, 1624] ,groups_width=56 ,layer_type="x" ), "regnet-x-080": ImageNetPreTrainedConfig( depths=[2, 5, 15, 1] ,hidden_sizes=[80, 240, 720, 1920] ,groups_width=120 ,layer_type="x" ), "regnet-x-120": ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] ,hidden_sizes=[224, 448, 896, 2240] ,groups_width=112 ,layer_type="x" ), "regnet-x-160": ImageNetPreTrainedConfig( depths=[2, 6, 13, 1] ,hidden_sizes=[256, 512, 896, 2048] ,groups_width=128 ,layer_type="x" ), "regnet-x-320": ImageNetPreTrainedConfig( depths=[2, 7, 13, 1] ,hidden_sizes=[336, 672, 1344, 2520] ,groups_width=168 ,layer_type="x" ), # y variant "regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] ,hidden_sizes=[24, 56, 152, 368] ,groups_width=8 ), "regnet-y-004": ImageNetPreTrainedConfig( depths=[1, 3, 6, 6] ,hidden_sizes=[48, 104, 208, 440] ,groups_width=8 ), "regnet-y-006": ImageNetPreTrainedConfig( depths=[1, 3, 7, 4] ,hidden_sizes=[48, 112, 256, 608] ,groups_width=16 ), "regnet-y-008": ImageNetPreTrainedConfig( depths=[1, 3, 8, 2] ,hidden_sizes=[64, 128, 320, 768] ,groups_width=16 ), "regnet-y-016": ImageNetPreTrainedConfig( depths=[2, 6, 17, 2] ,hidden_sizes=[48, 120, 336, 888] ,groups_width=24 ), "regnet-y-032": ImageNetPreTrainedConfig( depths=[2, 5, 13, 1] ,hidden_sizes=[72, 216, 576, 1512] ,groups_width=24 ), "regnet-y-040": ImageNetPreTrainedConfig( depths=[2, 6, 12, 2] ,hidden_sizes=[128, 192, 512, 1088] ,groups_width=64 ), "regnet-y-064": ImageNetPreTrainedConfig( depths=[2, 7, 14, 2] ,hidden_sizes=[144, 288, 576, 1296] ,groups_width=72 ), "regnet-y-080": ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] ,hidden_sizes=[168, 448, 896, 2016] ,groups_width=56 ), "regnet-y-120": ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] ,hidden_sizes=[224, 448, 896, 2240] ,groups_width=112 ), "regnet-y-160": ImageNetPreTrainedConfig( depths=[2, 4, 11, 1] ,hidden_sizes=[224, 448, 1232, 3024] ,groups_width=112 ), "regnet-y-320": ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] ,hidden_sizes=[232, 696, 1392, 3712] ,groups_width=232 ), # models created by SEER -> https://arxiv.org/abs/2202.08360 "regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] ,hidden_sizes=[232, 696, 1392, 3712] ,groups_width=232 ), "regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] ,hidden_sizes=[328, 984, 1968, 4920] ,groups_width=328 ), "regnet-y-1280-seer": RegNetConfig( depths=[2, 7, 17, 1] ,hidden_sizes=[528, 1056, 2904, 7392] ,groups_width=264 ), "regnet-y-2560-seer": RegNetConfig( depths=[3, 7, 16, 1] ,hidden_sizes=[640, 1696, 2544, 5088] ,groups_width=640 ), "regnet-y-10b-seer": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] ,hidden_sizes=[2020, 4040, 1_1110, 2_8280] ,groups_width=1010 ), # finetuned on imagenet "regnet-y-320-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] ,hidden_sizes=[232, 696, 1392, 3712] ,groups_width=232 ), "regnet-y-640-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] ,hidden_sizes=[328, 984, 1968, 4920] ,groups_width=328 ), "regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] ,hidden_sizes=[528, 1056, 2904, 7392] ,groups_width=264 ), "regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig( depths=[3, 7, 16, 1] ,hidden_sizes=[640, 1696, 2544, 5088] ,groups_width=640 ), "regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] ,hidden_sizes=[2020, 4040, 1_1110, 2_8280] ,groups_width=1010 ), } snake_case_ : Any = NameToOurModelFuncMap() snake_case_ : Tuple = NameToFromModelFuncMap() # add seer weights logic def load_using_classy_vision(__magic_name__ ,__magic_name__ ) -> Tuple[nn.Module, Dict]: snake_case_ : Optional[Any] = torch.hub.load_state_dict_from_url(__magic_name__ ,model_dir=str(__magic_name__ ) ,map_location="cpu" ) snake_case_ : int = model_func() # check if we have a head, if yes add it snake_case_ : Union[str, Any] = files["classy_state_dict"]["base_model"]["model"] snake_case_ : Union[str, Any] = model_state_dict["trunk"] model.load_state_dict(__magic_name__ ) return model.eval(), model_state_dict["heads"] # pretrained snake_case_ : Any = partial( __magic_name__ ,"https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" ,lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) ,) snake_case_ : Dict = partial( __magic_name__ ,"https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" ,lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) ,) snake_case_ : Optional[int] = partial( __magic_name__ ,"https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" ,lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) ,) snake_case_ : Union[str, Any] = partial( __magic_name__ ,"https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" ,lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 ,group_width=1010 ,w_a=1744 ,w_a=620.83 ,w_m=2.52 ) ) ) ,) # IN1K finetuned snake_case_ : str = partial( __magic_name__ ,"https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" ,lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) ,) snake_case_ : Optional[int] = partial( __magic_name__ ,"https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" ,lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) ,) snake_case_ : str = partial( __magic_name__ ,"https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" ,lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) ,) snake_case_ : Union[str, Any] = partial( __magic_name__ ,"https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" ,lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 ,group_width=1010 ,w_a=1744 ,w_a=620.83 ,w_m=2.52 ) ) ) ,) if model_name: convert_weight_and_push( __magic_name__ ,names_to_from_model_map[model_name] ,names_to_ours_model_map[model_name] ,names_to_config[model_name] ,__magic_name__ ,__magic_name__ ,) else: for model_name, config in names_to_config.items(): convert_weight_and_push( __magic_name__ ,names_to_from_model_map[model_name] ,names_to_ours_model_map[model_name] ,__magic_name__ ,__magic_name__ ,__magic_name__ ,) return config, expected_shape if __name__ == "__main__": __lowerCamelCase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help=( '''The name of the model you wish to convert, it must be one of the supported regnet* architecture,''' ''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=Path, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=True, type=bool, required=False, help='''If True, push model and image processor to the hub.''', ) __lowerCamelCase : Any = parser.parse_args() __lowerCamelCase : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
656
'''simple docstring''' from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def __UpperCAmelCase ( )-> int: """simple docstring""" snake_case_ : Any = { "repo_name": ["test_repo1", "test_repo2", "test_repo3"], "path": ["test_1.py", "test_2.py", "unit_test.py"], "content": ["a " * 20, "a " * 30, "b " * 7], } snake_case_ : int = Dataset.from_dict(__magic_name__ ) return dataset class A_ (a_ ): """simple docstring""" def _A ( self :List[str] ) -> str: '''simple docstring''' snake_case_ : Union[str, Any] = get_dataset() snake_case_ : Optional[int] = make_duplicate_clusters(lowerCAmelCase__ , 0.8_5 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def _A ( self :Union[str, Any] ) -> List[str]: '''simple docstring''' snake_case_ : Optional[int] = get_dataset() snake_case_, snake_case_ : List[Any] = deduplicate_dataset(lowerCAmelCase__ ) self.assertEqual(len(lowerCAmelCase__ ) , 2 ) print(lowerCAmelCase__ ) self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 ) self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , lowerCAmelCase__ )
656
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer __lowerCamelCase : Any = logging.get_logger(__name__) __lowerCamelCase : Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} __lowerCamelCase : int = { '''vocab_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json''' ), }, '''merges_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt''' ), }, '''tokenizer_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''', '''roberta-base-openai-detector''': ( '''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json''' ), '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json''' ), }, } __lowerCamelCase : Optional[int] = { '''roberta-base''': 512, '''roberta-large''': 512, '''roberta-large-mnli''': 512, '''distilroberta-base''': 512, '''roberta-base-openai-detector''': 512, '''roberta-large-openai-detector''': 512, } class A_ (a_ ): """simple docstring""" a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = ['''input_ids''', '''attention_mask'''] a__ = RobertaTokenizer def __init__( self :Tuple , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Optional[Any]="replace" , lowerCAmelCase__ :Dict="<s>" , lowerCAmelCase__ :Optional[int]="</s>" , lowerCAmelCase__ :str="</s>" , lowerCAmelCase__ :Tuple="<s>" , lowerCAmelCase__ :Tuple="<unk>" , lowerCAmelCase__ :Dict="<pad>" , lowerCAmelCase__ :Union[str, Any]="<mask>" , lowerCAmelCase__ :str=False , lowerCAmelCase__ :str=True , **lowerCAmelCase__ :str , ) -> Dict: '''simple docstring''' super().__init__( lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ , **lowerCAmelCase__ , ) snake_case_ : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , lowerCAmelCase__ ) != add_prefix_space: snake_case_ : Any = getattr(lowerCAmelCase__ , pre_tok_state.pop("type" ) ) snake_case_ : List[str] = add_prefix_space snake_case_ : Optional[Any] = pre_tok_class(**lowerCAmelCase__ ) snake_case_ : Tuple = add_prefix_space snake_case_ : List[Any] = "post_processor" snake_case_ : List[str] = getattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__ ) if tokenizer_component_instance: snake_case_ : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: snake_case_ : Optional[int] = tuple(state["sep"] ) if "cls" in state: snake_case_ : List[str] = tuple(state["cls"] ) snake_case_ : Any = False if state.get("add_prefix_space" , lowerCAmelCase__ ) != add_prefix_space: snake_case_ : List[Any] = add_prefix_space snake_case_ : Optional[int] = True if state.get("trim_offsets" , lowerCAmelCase__ ) != trim_offsets: snake_case_ : Tuple = trim_offsets snake_case_ : Optional[Any] = True if changes_to_apply: snake_case_ : int = getattr(lowerCAmelCase__ , state.pop("type" ) ) snake_case_ : str = component_class(**lowerCAmelCase__ ) setattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__ ) @property def _A ( self :List[Any] ) -> str: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def _A ( self :str , lowerCAmelCase__ :str ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else value snake_case_ : Union[str, Any] = value def _A ( self :Optional[Any] , *lowerCAmelCase__ :str , **lowerCAmelCase__ :List[str] ) -> BatchEncoding: '''simple docstring''' snake_case_ : Dict = kwargs.get("is_split_into_words" , lowerCAmelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ ) def _A ( self :int , *lowerCAmelCase__ :str , **lowerCAmelCase__ :Any ) -> BatchEncoding: '''simple docstring''' snake_case_ : Any = kwargs.get("is_split_into_words" , lowerCAmelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ ) def _A ( self :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]: '''simple docstring''' snake_case_ : Dict = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ ) return tuple(lowerCAmelCase__ ) def _A ( self :int , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Any=None ) -> List[Any]: '''simple docstring''' snake_case_ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _A ( self :str , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]: '''simple docstring''' snake_case_ : Union[str, Any] = [self.sep_token_id] snake_case_ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
656
'''simple docstring''' from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable __lowerCamelCase : Dict = { '''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''], '''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : int = [ '''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTNeoXJapaneseForCausalLM''', '''GPTNeoXJapaneseLayer''', '''GPTNeoXJapaneseModel''', '''GPTNeoXJapanesePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys __lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
656
1
'''simple docstring''' import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": __lowerCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument( '''--txt2img_unclip''', default='''kakaobrain/karlo-v1-alpha''', type=str, required=False, help='''The pretrained txt2img unclip.''', ) __lowerCamelCase : Optional[Any] = parser.parse_args() __lowerCamelCase : Tuple = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) __lowerCamelCase : Union[str, Any] = CLIPImageProcessor() __lowerCamelCase : Optional[Any] = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''') __lowerCamelCase : List[Any] = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
656
'''simple docstring''' def __UpperCAmelCase ( __magic_name__ )-> list[int]: """simple docstring""" if length <= 0 or not isinstance(__magic_name__ ,__magic_name__ ): raise ValueError("Length must be a positive integer." ) return [n * (2 * n - 1) for n in range(__magic_name__ )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
656
1
'''simple docstring''' import importlib import torch import yaml from omegaconf import OmegaConf from taming.models.vqgan import VQModel def __UpperCAmelCase ( __magic_name__ ,__magic_name__=False )-> Dict: """simple docstring""" snake_case_ : Any = OmegaConf.load(__magic_name__ ) if display: print(yaml.dump(OmegaConf.to_container(__magic_name__ ) ) ) return config def __UpperCAmelCase ( __magic_name__ ,__magic_name__=None ,__magic_name__=None )-> List[str]: """simple docstring""" if conf_path is None: snake_case_ : str = "./model_checkpoints/vqgan_only.yaml" snake_case_ : List[Any] = load_config(__magic_name__ ,display=__magic_name__ ) snake_case_ : Dict = VQModel(**config.model.params ) if ckpt_path is None: snake_case_ : Union[str, Any] = "./model_checkpoints/vqgan_only.pt" snake_case_ : Optional[Any] = torch.load(__magic_name__ ,map_location=__magic_name__ ) if ".ckpt" in ckpt_path: snake_case_ : Optional[int] = sd["state_dict"] model.load_state_dict(__magic_name__ ,strict=__magic_name__ ) model.to(__magic_name__ ) del sd return model def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Optional[Any]: """simple docstring""" snake_case_, snake_case_, snake_case_ : Tuple = model.encode(__magic_name__ ) print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' ) snake_case_ : List[Any] = model.decode(__magic_name__ ) return xrec def __UpperCAmelCase ( __magic_name__ ,__magic_name__=False )-> List[Any]: """simple docstring""" snake_case_, snake_case_ : List[str] = string.rsplit("." ,1 ) if reload: snake_case_ : Dict = importlib.import_module(__magic_name__ ) importlib.reload(__magic_name__ ) return getattr(importlib.import_module(__magic_name__ ,package=__magic_name__ ) ,cls ) def __UpperCAmelCase ( __magic_name__ )-> Tuple: """simple docstring""" if "target" not in config: raise KeyError("Expected key `target` to instantiate." ) return get_obj_from_str(config["target"] )(**config.get("params" ,{} ) ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__=True ,__magic_name__=True )-> Optional[int]: """simple docstring""" snake_case_ : Any = instantiate_from_config(__magic_name__ ) if sd is not None: model.load_state_dict(__magic_name__ ) if gpu: model.cuda() if eval_mode: model.eval() return {"model": model} def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> str: """simple docstring""" if ckpt: snake_case_ : Any = torch.load(__magic_name__ ,map_location="cpu" ) snake_case_ : Any = pl_sd["global_step"] print(F'''loaded model from global step {global_step}.''' ) else: snake_case_ : List[str] = {"state_dict": None} snake_case_ : List[Any] = None snake_case_ : str = load_model_from_config(config.model ,pl_sd["state_dict"] ,gpu=__magic_name__ ,eval_mode=__magic_name__ )["model"] return model, global_step
656
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def __UpperCAmelCase ( __magic_name__=None )-> List[str]: """simple docstring""" if subparsers is not None: snake_case_ : List[str] = subparsers.add_parser("test" ) else: snake_case_ : List[Any] = argparse.ArgumentParser("Accelerate test command" ) parser.add_argument( "--config_file" ,default=__magic_name__ ,help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) ,) if subparsers is not None: parser.set_defaults(func=__magic_name__ ) return parser def __UpperCAmelCase ( __magic_name__ )-> Tuple: """simple docstring""" snake_case_ : Optional[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] ) if args.config_file is None: snake_case_ : str = script_name else: snake_case_ : Any = F'''--config_file={args.config_file} {script_name}''' snake_case_ : Union[str, Any] = ["accelerate-launch"] + test_args.split() snake_case_ : Optional[int] = execute_subprocess_async(__magic_name__ ,env=os.environ.copy() ) if result.returncode == 0: print("Test is a success! You are ready for your distributed training!" ) def __UpperCAmelCase ( )-> int: """simple docstring""" snake_case_ : Dict = test_command_parser() snake_case_ : Dict = parser.parse_args() test_command(__magic_name__ ) if __name__ == "__main__": main()
656
1
'''simple docstring''' import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class A_ (unittest.TestCase ): """simple docstring""" def __init__( self :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :List[Any]=13 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :int=True , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :List[Any]=99 , lowerCAmelCase__ :Union[str, Any]=32 , lowerCAmelCase__ :int=5 , lowerCAmelCase__ :int=4 , lowerCAmelCase__ :str=37 , lowerCAmelCase__ :Tuple="gelu" , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Optional[int]=0.1 , lowerCAmelCase__ :Any=512 , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :Optional[Any]=2 , lowerCAmelCase__ :Dict=0.0_2 , lowerCAmelCase__ :str=4 , ) -> List[str]: '''simple docstring''' snake_case_ : Optional[int] = parent snake_case_ : Any = batch_size snake_case_ : Union[str, Any] = seq_length snake_case_ : Any = is_training snake_case_ : Optional[Any] = use_attention_mask snake_case_ : str = use_token_type_ids snake_case_ : Any = use_labels snake_case_ : int = vocab_size snake_case_ : Dict = hidden_size snake_case_ : str = num_hidden_layers snake_case_ : str = num_attention_heads snake_case_ : List[str] = intermediate_size snake_case_ : str = hidden_act snake_case_ : int = hidden_dropout_prob snake_case_ : Union[str, Any] = attention_probs_dropout_prob snake_case_ : Dict = max_position_embeddings snake_case_ : Tuple = type_vocab_size snake_case_ : Optional[int] = type_sequence_label_size snake_case_ : Optional[int] = initializer_range snake_case_ : Union[str, Any] = num_choices def _A ( self :Optional[Any] ) -> Any: '''simple docstring''' snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ : Tuple = None if self.use_attention_mask: snake_case_ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ : Union[str, Any] = None if self.use_token_type_ids: snake_case_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ : List[Any] = RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _A ( self :Optional[Any] ) -> Optional[Any]: '''simple docstring''' snake_case_ : int = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_, snake_case_ : Union[str, Any] = config_and_inputs snake_case_ : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def _A ( self :Optional[Any] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[Any] = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_, snake_case_ : List[Any] = config_and_inputs snake_case_ : Optional[int] = True snake_case_ : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class A_ (a_ , unittest.TestCase ): """simple docstring""" a__ = True a__ = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def _A ( self :Union[str, Any] ) -> Tuple: '''simple docstring''' snake_case_ : Dict = FlaxRobertaPreLayerNormModelTester(self ) @slow def _A ( self :Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' for model_class_name in self.all_model_classes: snake_case_ : Dict = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCAmelCase__ ) snake_case_ : Any = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCAmelCase__ ) @require_flax class A_ (unittest.TestCase ): """simple docstring""" @slow def _A ( self :Dict ) -> Tuple: '''simple docstring''' snake_case_ : List[Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCAmelCase__ ) snake_case_ : str = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa ) snake_case_ : Optional[Any] = model(lowerCAmelCase__ )[0] snake_case_ : Optional[int] = [1, 11, 50_265] self.assertEqual(list(output.shape ) , lowerCAmelCase__ ) # compare the actual values for a slice. snake_case_ : Union[str, Any] = np.array( [[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) ) @slow def _A ( self :Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[Any] = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCAmelCase__ ) snake_case_ : List[Any] = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa ) snake_case_ : Union[str, Any] = model(lowerCAmelCase__ )[0] # compare the actual values for a slice. snake_case_ : Union[str, Any] = np.array( [[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
656
'''simple docstring''' from scipy.stats import spearmanr import datasets __lowerCamelCase : str = ''' The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. ''' __lowerCamelCase : int = ''' Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {\'spearmanr\': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results[\'spearmanr\']) -0.7 >>> print(round(results[\'spearmanr_pvalue\'], 2)) 0.19 ''' __lowerCamelCase : List[str] = R'''\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A_ (datasets.Metric ): """simple docstring""" def _A ( self :str ) -> Union[str, Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , ) def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any]=False ) -> List[Any]: '''simple docstring''' snake_case_ : Optional[Any] = spearmanr(lowerCAmelCase__ , lowerCAmelCase__ ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
656
1
'''simple docstring''' import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration __lowerCamelCase : Dict = pytest.mark.integration __lowerCamelCase : Any = {'''comet'''} __lowerCamelCase : Optional[int] = importlib.util.find_spec('''fairseq''') is not None __lowerCamelCase : int = {'''code_eval'''} __lowerCamelCase : Dict = os.name == '''nt''' __lowerCamelCase : str = {'''bertscore''', '''frugalscore''', '''perplexity'''} __lowerCamelCase : List[str] = importlib.util.find_spec('''transformers''') is not None def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]: """simple docstring""" @wraps(__magic_name__ ) def wrapper(self ,__magic_name__ ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest("\"test requires Fairseq\"" ) else: test_case(self ,__magic_name__ ) return wrapper def __UpperCAmelCase ( __magic_name__ )-> List[Any]: """simple docstring""" @wraps(__magic_name__ ) def wrapper(self ,__magic_name__ ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest("\"test requires transformers\"" ) else: test_case(self ,__magic_name__ ) return wrapper def __UpperCAmelCase ( __magic_name__ )-> Optional[int]: """simple docstring""" @wraps(__magic_name__ ) def wrapper(self ,__magic_name__ ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest("\"test not supported on Windows\"" ) else: test_case(self ,__magic_name__ ) return wrapper def __UpperCAmelCase ( )-> Optional[Any]: """simple docstring""" snake_case_ : Optional[Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( a_ , a_ , a_ ) @local class A_ (parameterized.TestCase ): """simple docstring""" a__ = {} a__ = None @pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" ) @pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" ) def _A ( self :List[str] , lowerCAmelCase__ :List[Any] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Dict = "[...]" snake_case_ : Union[str, Any] = importlib.import_module( datasets.load.metric_module_factory(os.path.join("metrics" , lowerCAmelCase__ ) ).module_path ) snake_case_ : Optional[Any] = datasets.load.import_main_class(metric_module.__name__ , dataset=lowerCAmelCase__ ) # check parameters snake_case_ : int = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(lowerCAmelCase__ , metric_module.__name__ ): with self.use_local_metrics(): try: snake_case_ : Optional[Any] = doctest.testmod(lowerCAmelCase__ , verbose=lowerCAmelCase__ , raise_on_error=lowerCAmelCase__ ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def _A ( self :Union[str, Any] , lowerCAmelCase__ :Optional[int] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[Any] = "[...]" snake_case_ : List[Any] = importlib.import_module( datasets.load.metric_module_factory(os.path.join("metrics" , lowerCAmelCase__ ) ).module_path ) # run doctest with self.use_local_metrics(): snake_case_ : Any = doctest.testmod(lowerCAmelCase__ , verbose=lowerCAmelCase__ , raise_on_error=lowerCAmelCase__ ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def _A ( self :List[str] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str] ) -> Optional[Any]: '''simple docstring''' if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](lowerCAmelCase__ ): yield else: yield @contextmanager def _A ( self :Optional[int] ) -> Optional[int]: '''simple docstring''' def load_local_metric(lowerCAmelCase__ :Optional[Any] , *lowerCAmelCase__ :Any , **lowerCAmelCase__ :List[str] ): return load_metric(os.path.join("metrics" , lowerCAmelCase__ ) , *lowerCAmelCase__ , **lowerCAmelCase__ ) with patch("datasets.load_metric" ) as mock_load_metric: snake_case_ : List[str] = load_local_metric yield @classmethod def _A ( cls :Optional[Any] , lowerCAmelCase__ :str ) -> str: '''simple docstring''' def wrapper(lowerCAmelCase__ :Any ): snake_case_ : Optional[int] = contextmanager(lowerCAmelCase__ ) snake_case_ : List[Any] = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher("bleurt" ) def __UpperCAmelCase ( __magic_name__ )-> List[Any]: """simple docstring""" import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string("sv" ,"" ,"" ) # handle pytest cli flags class A_ (a_ ): """simple docstring""" def _A ( self :Any , lowerCAmelCase__ :Optional[Any] ) -> str: '''simple docstring''' assert len(input_dict["input_ids"] ) == 2 return np.array([1.0_3, 1.0_4] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch("bleurt.score._create_predictor" ) as mock_create_predictor: snake_case_ : Dict = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher("bertscore" ) def __UpperCAmelCase ( __magic_name__ )-> Tuple: """simple docstring""" import torch def bert_cos_score_idf(__magic_name__ ,__magic_name__ ,*__magic_name__ ,**__magic_name__ ): return torch.tensor([[1.0, 1.0, 1.0]] * len(__magic_name__ ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch("bert_score.scorer.get_model" ), patch( "bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf: snake_case_ : List[Any] = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher("comet" ) def __UpperCAmelCase ( __magic_name__ )-> Optional[int]: """simple docstring""" def load_from_checkpoint(__magic_name__ ): class A_ : """simple docstring""" def _A ( self :Tuple , lowerCAmelCase__ :Tuple , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :str ) -> List[Any]: '''simple docstring''' assert len(lowerCAmelCase__ ) == 2 snake_case_ : Tuple = [0.1_9, 0.9_2] return scores, sum(lowerCAmelCase__ ) / len(lowerCAmelCase__ ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch("comet.download_model" ) as mock_download_model: snake_case_ : Union[str, Any] = None with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint: snake_case_ : int = load_from_checkpoint yield def __UpperCAmelCase ( )-> List[Any]: """simple docstring""" snake_case_ : Optional[int] = load_metric(os.path.join("metrics" ,"seqeval" ) ) snake_case_ : int = "ERROR" snake_case_ : int = F'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}''' with pytest.raises(__magic_name__ ,match=re.escape(__magic_name__ ) ): metric.compute(predictions=[] ,references=[] ,scheme=__magic_name__ )
656
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): __lowerCamelCase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right __lowerCamelCase : str = 128022 __lowerCamelCase : List[Any] = 128028 @require_sentencepiece class A_ (a_ , unittest.TestCase ): """simple docstring""" a__ = MaMaaaTokenizer a__ = False a__ = False a__ = True def _A ( self :Union[str, Any] ) -> List[str]: '''simple docstring''' super().setUp() snake_case_ : int = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"] snake_case_ : Any = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) snake_case_ : Optional[int] = Path(self.tmpdirname ) save_json(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["spm_file"] ) snake_case_ : Union[str, Any] = MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def _A ( self :List[Any] , **lowerCAmelCase__ :List[Any] ) -> str: '''simple docstring''' return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def _A ( self :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[int]: '''simple docstring''' return ( "This is a test", "This is a test", ) def _A ( self :List[str] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : str = "</s>" snake_case_ : Union[str, Any] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ ) def _A ( self :Union[str, Any] ) -> List[str]: '''simple docstring''' snake_case_ : Union[str, Any] = self.get_tokenizer() snake_case_ : Any = list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "</s>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "<s>" ) self.assertEqual(len(lowerCAmelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip("Skip this test while all models are still to be uploaded." ) def _A ( self :List[Any] ) -> Union[str, Any]: '''simple docstring''' pass def _A ( self :Optional[int] ) -> int: '''simple docstring''' snake_case_ : int = self.get_tokenizer() snake_case_ : List[str] = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [2, 3, 4, 5, 6] , ) snake_case_ : Any = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) snake_case_ : Any = tokenizer.convert_tokens_to_string(lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , "This is a test" ) @slow def _A ( self :Any ) -> List[Any]: '''simple docstring''' snake_case_ : int = {"input_ids": [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase__ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , ) @require_torch @require_sentencepiece @require_tokenizers class A_ (unittest.TestCase ): """simple docstring""" a__ = '''facebook/m2m100_418M''' a__ = [ '''In my opinion, there are two levels of response from the French government.''', '''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''', ] a__ = [ '''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''', '''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''', ] # fmt: off a__ = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2] @classmethod def _A ( cls :str ) -> int: '''simple docstring''' snake_case_ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en" , tgt_lang="fr" ) snake_case_ : List[str] = 1 return cls def _A ( self :Tuple ) -> Union[str, Any]: '''simple docstring''' self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 128_006 ) self.assertEqual(self.tokenizer.get_lang_id("en" ) , 128_022 ) self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 128_076 ) self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 128_063 ) def _A ( self :Optional[int] ) -> List[str]: '''simple docstring''' snake_case_ : Dict = self.tokenizer.get_vocab() self.assertEqual(len(lowerCAmelCase__ ) , self.tokenizer.vocab_size ) self.assertEqual(vocab["<unk>"] , 3 ) self.assertIn(self.tokenizer.get_lang_token("en" ) , lowerCAmelCase__ ) def _A ( self :Any ) -> Dict: '''simple docstring''' snake_case_ : List[str] = "en" snake_case_ : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ ) def _A ( self :Union[str, Any] ) -> Dict: '''simple docstring''' self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids ) # fmt: off snake_case_ : Dict = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2] # fmt: on snake_case_ : List[str] = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) snake_case_ : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ ) def _A ( self :Tuple ) -> Tuple: '''simple docstring''' snake_case_ : Union[str, Any] = tempfile.mkdtemp() snake_case_ : int = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(lowerCAmelCase__ ) snake_case_ : List[str] = MaMaaaTokenizer.from_pretrained(lowerCAmelCase__ ) self.assertDictEqual(new_tok.lang_token_to_id , lowerCAmelCase__ ) @require_torch def _A ( self :Optional[Any] ) -> str: '''simple docstring''' snake_case_ : Union[str, Any] = "en" snake_case_ : Tuple = "fr" snake_case_ : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors="pt" ) snake_case_ : Dict = shift_tokens_right( batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: snake_case_ : str = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def _A ( self :Optional[Any] ) -> Tuple: '''simple docstring''' snake_case_ : List[str] = "mr" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) snake_case_ : int = "zh" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def _A ( self :str ) -> int: '''simple docstring''' snake_case_ : Dict = "mr" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) snake_case_ : Tuple = "zh" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def _A ( self :Optional[Any] ) -> Optional[int]: '''simple docstring''' snake_case_ : Optional[int] = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { # en_XX, A, test, EOS "input_ids": [[128_022, 58, 4_183, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 128_006, } , )
656
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : List[str] = {'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : str = ['''FNetTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[str] = ['''FNetTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Optional[Any] = [ '''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FNetForMaskedLM''', '''FNetForMultipleChoice''', '''FNetForNextSentencePrediction''', '''FNetForPreTraining''', '''FNetForQuestionAnswering''', '''FNetForSequenceClassification''', '''FNetForTokenClassification''', '''FNetLayer''', '''FNetModel''', '''FNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys __lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
656
'''simple docstring''' import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py __lowerCamelCase : str = '''.''' # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) __lowerCamelCase : Tuple = [ '''Assert''', '''AssignVariableOp''', '''EmptyTensorList''', '''MergeV2Checkpoints''', '''ReadVariableOp''', '''ResourceGather''', '''RestoreV2''', '''SaveV2''', '''ShardedFilename''', '''StatefulPartitionedCall''', '''StaticRegexFullMatch''', '''VarHandleOp''', ] def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[str]: """simple docstring""" snake_case_ : Tuple = SavedModel() snake_case_ : Dict = [] with open(os.path.join(__magic_name__ ,"utils" ,"tf_ops" ,"onnx.json" ) ) as f: snake_case_ : Dict = json.load(__magic_name__ )["opsets"] for i in range(1 ,opset + 1 ): onnx_ops.extend(onnx_opsets[str(__magic_name__ )] ) with open(__magic_name__ ,"rb" ) as f: saved_model.ParseFromString(f.read() ) snake_case_ : Tuple = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want snake_case_ : str = sorted(__magic_name__ ) snake_case_ : Optional[int] = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(__magic_name__ ) if strict and len(__magic_name__ ) > 0: raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops ) elif len(__magic_name__ ) > 0: print(F'''Found the following incompatible ops for the opset {opset}:''' ) print(*__magic_name__ ,sep="\n" ) else: print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' ) if __name__ == "__main__": __lowerCamelCase : Any = argparse.ArgumentParser() parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''') parser.add_argument( '''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.''' ) parser.add_argument( '''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.''' ) parser.add_argument( '''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)''' ) __lowerCamelCase : Dict = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
656
1
'''simple docstring''' import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]: """simple docstring""" snake_case_ : Any = VideoMAEConfig() set_architecture_configs(__magic_name__ ,__magic_name__ ) if "finetuned" not in model_name: snake_case_ : Union[str, Any] = False if "finetuned" in model_name: snake_case_ : str = "huggingface/label-files" if "kinetics" in model_name: snake_case_ : List[str] = 400 snake_case_ : int = "kinetics400-id2label.json" elif "ssv2" in model_name: snake_case_ : Dict = 174 snake_case_ : Union[str, Any] = "something-something-v2-id2label.json" else: raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." ) snake_case_ : Optional[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) ) snake_case_ : Optional[int] = {int(__magic_name__ ): v for k, v in idalabel.items()} snake_case_ : Optional[int] = idalabel snake_case_ : List[str] = {v: k for k, v in idalabel.items()} return config def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[Any]: """simple docstring""" if "small" in model_name: snake_case_ : str = 384 snake_case_ : str = 1536 snake_case_ : str = 12 snake_case_ : Optional[int] = 16 snake_case_ : List[Any] = 12 snake_case_ : Optional[int] = 3 snake_case_ : Optional[int] = 192 snake_case_ : Any = 768 elif "large" in model_name: snake_case_ : Any = 1024 snake_case_ : Union[str, Any] = 4096 snake_case_ : Optional[int] = 24 snake_case_ : str = 16 snake_case_ : Tuple = 12 snake_case_ : int = 8 snake_case_ : Optional[int] = 512 snake_case_ : Any = 2048 elif "huge" in model_name: snake_case_ : Tuple = 1280 snake_case_ : Dict = 5120 snake_case_ : int = 32 snake_case_ : Optional[Any] = 16 snake_case_ : List[Any] = 12 snake_case_ : Tuple = 8 snake_case_ : Optional[int] = 640 snake_case_ : List[str] = 2560 elif "base" not in model_name: raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" ) def __UpperCAmelCase ( __magic_name__ )-> Dict: """simple docstring""" if "encoder." in name: snake_case_ : Any = name.replace("encoder." ,"" ) if "cls_token" in name: snake_case_ : Dict = name.replace("cls_token" ,"videomae.embeddings.cls_token" ) if "decoder_pos_embed" in name: snake_case_ : List[str] = name.replace("decoder_pos_embed" ,"decoder.decoder_pos_embed" ) if "pos_embed" in name and "decoder" not in name: snake_case_ : List[Any] = name.replace("pos_embed" ,"videomae.embeddings.position_embeddings" ) if "patch_embed.proj" in name: snake_case_ : str = name.replace("patch_embed.proj" ,"videomae.embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: snake_case_ : int = name.replace("patch_embed.norm" ,"videomae.embeddings.norm" ) if "decoder.blocks" in name: snake_case_ : Any = name.replace("decoder.blocks" ,"decoder.decoder_layers" ) if "blocks" in name: snake_case_ : Optional[int] = name.replace("blocks" ,"videomae.encoder.layer" ) if "attn.proj" in name: snake_case_ : Union[str, Any] = name.replace("attn.proj" ,"attention.output.dense" ) if "attn" in name and "bias" not in name: snake_case_ : List[Any] = name.replace("attn" ,"attention.self" ) if "attn" in name: snake_case_ : Dict = name.replace("attn" ,"attention.attention" ) if "norm1" in name: snake_case_ : List[Any] = name.replace("norm1" ,"layernorm_before" ) if "norm2" in name: snake_case_ : Union[str, Any] = name.replace("norm2" ,"layernorm_after" ) if "mlp.fc1" in name: snake_case_ : int = name.replace("mlp.fc1" ,"intermediate.dense" ) if "mlp.fc2" in name: snake_case_ : Any = name.replace("mlp.fc2" ,"output.dense" ) if "decoder_embed" in name: snake_case_ : Optional[int] = name.replace("decoder_embed" ,"decoder.decoder_embed" ) if "decoder_norm" in name: snake_case_ : str = name.replace("decoder_norm" ,"decoder.decoder_norm" ) if "decoder_pred" in name: snake_case_ : List[Any] = name.replace("decoder_pred" ,"decoder.decoder_pred" ) if "norm.weight" in name and "decoder" not in name and "fc" not in name: snake_case_ : Union[str, Any] = name.replace("norm.weight" ,"videomae.layernorm.weight" ) if "norm.bias" in name and "decoder" not in name and "fc" not in name: snake_case_ : str = name.replace("norm.bias" ,"videomae.layernorm.bias" ) if "head" in name and "decoder" not in name: snake_case_ : Any = name.replace("head" ,"classifier" ) return name def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Any: """simple docstring""" for key in orig_state_dict.copy().keys(): snake_case_ : Optional[Any] = orig_state_dict.pop(__magic_name__ ) if key.startswith("encoder." ): snake_case_ : Dict = key.replace("encoder." ,"" ) if "qkv" in key: snake_case_ : List[Any] = key.split("." ) if key.startswith("decoder.blocks" ): snake_case_ : Any = config.decoder_hidden_size snake_case_ : Optional[int] = int(key_split[2] ) snake_case_ : Dict = "decoder.decoder_layers." if "weight" in key: snake_case_ : int = val[:dim, :] snake_case_ : Union[str, Any] = val[dim : dim * 2, :] snake_case_ : Dict = val[-dim:, :] else: snake_case_ : Tuple = config.hidden_size snake_case_ : List[str] = int(key_split[1] ) snake_case_ : str = "videomae.encoder.layer." if "weight" in key: snake_case_ : List[str] = val[:dim, :] snake_case_ : List[Any] = val[dim : dim * 2, :] snake_case_ : Any = val[-dim:, :] else: snake_case_ : Any = val return orig_state_dict def __UpperCAmelCase ( )-> int: """simple docstring""" snake_case_ : Union[str, Any] = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video" ,filename="eating_spaghetti.npy" ,repo_type="dataset" ) snake_case_ : List[Any] = np.load(__magic_name__ ) return list(__magic_name__ ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> str: """simple docstring""" snake_case_ : Tuple = get_videomae_config(__magic_name__ ) if "finetuned" in model_name: snake_case_ : Optional[int] = VideoMAEForVideoClassification(__magic_name__ ) else: snake_case_ : int = VideoMAEForPreTraining(__magic_name__ ) # download original checkpoint, hosted on Google Drive snake_case_ : Any = "pytorch_model.bin" gdown.cached_download(__magic_name__ ,__magic_name__ ,quiet=__magic_name__ ) snake_case_ : List[Any] = torch.load(__magic_name__ ,map_location="cpu" ) if "model" in files: snake_case_ : Union[str, Any] = files["model"] else: snake_case_ : int = files["module"] snake_case_ : Optional[Any] = convert_state_dict(__magic_name__ ,__magic_name__ ) model.load_state_dict(__magic_name__ ) model.eval() # verify model on basic input snake_case_ : Union[str, Any] = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5] ) snake_case_ : List[str] = prepare_video() snake_case_ : Union[str, Any] = image_processor(__magic_name__ ,return_tensors="pt" ) if "finetuned" not in model_name: snake_case_ : Any = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" ,filename="bool_masked_pos.pt" ) snake_case_ : Dict = torch.load(__magic_name__ ) snake_case_ : Any = model(**__magic_name__ ) snake_case_ : Tuple = outputs.logits snake_case_ : Tuple = [ "videomae-small-finetuned-kinetics", "videomae-small-finetuned-ssv2", # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) "videomae-base-short", "videomae-base-short-finetuned-kinetics", "videomae-base", "videomae-base-finetuned-kinetics", "videomae-large", "videomae-large-finetuned-kinetics", "videomae-huge-finetuned-kinetics", # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) "videomae-base-short-ssv2", "videomae-base-short-finetuned-ssv2", "videomae-base-ssv2", "videomae-base-finetuned-ssv2", ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": snake_case_ : Any = torch.Size([1, 400] ) snake_case_ : List[Any] = torch.tensor([-0.9_291, -0.4_061, -0.9_307] ) elif model_name == "videomae-small-finetuned-ssv2": snake_case_ : List[Any] = torch.Size([1, 174] ) snake_case_ : Union[str, Any] = torch.tensor([0.2_671, -0.4_689, -0.8_235] ) elif model_name == "videomae-base": snake_case_ : Dict = torch.Size([1, 1408, 1536] ) snake_case_ : Dict = torch.tensor([[0.7_739, 0.7_968, 0.7_089], [0.6_701, 0.7_487, 0.6_209], [0.4_287, 0.5_158, 0.4_773]] ) elif model_name == "videomae-base-short": snake_case_ : Union[str, Any] = torch.Size([1, 1408, 1536] ) snake_case_ : Optional[int] = torch.tensor([[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] ) # we verified the loss both for normalized and unnormalized targets for this one snake_case_ : List[str] = torch.tensor([0.5_142] ) if config.norm_pix_loss else torch.tensor([0.6_469] ) elif model_name == "videomae-large": snake_case_ : Any = torch.Size([1, 1408, 1536] ) snake_case_ : List[str] = torch.tensor([[0.7_149, 0.7_997, 0.6_966], [0.6_768, 0.7_869, 0.6_948], [0.5_139, 0.6_221, 0.5_605]] ) elif model_name == "videomae-large-finetuned-kinetics": snake_case_ : List[Any] = torch.Size([1, 400] ) snake_case_ : Optional[int] = torch.tensor([0.0_771, 0.0_011, -0.3_625] ) elif model_name == "videomae-huge-finetuned-kinetics": snake_case_ : List[str] = torch.Size([1, 400] ) snake_case_ : Union[str, Any] = torch.tensor([0.2_433, 0.1_632, -0.4_894] ) elif model_name == "videomae-base-short-finetuned-kinetics": snake_case_ : List[Any] = torch.Size([1, 400] ) snake_case_ : int = torch.tensor([0.6_588, 0.0_990, -0.2_493] ) elif model_name == "videomae-base-finetuned-kinetics": snake_case_ : Dict = torch.Size([1, 400] ) snake_case_ : str = torch.tensor([0.3_669, -0.0_688, -0.2_421] ) elif model_name == "videomae-base-short-ssv2": snake_case_ : Optional[Any] = torch.Size([1, 1408, 1536] ) snake_case_ : Optional[int] = torch.tensor([[0.4_712, 0.5_296, 0.5_786], [0.2_278, 0.2_729, 0.4_026], [0.0_352, 0.0_730, 0.2_506]] ) elif model_name == "videomae-base-short-finetuned-ssv2": snake_case_ : Dict = torch.Size([1, 174] ) snake_case_ : str = torch.tensor([-0.0_537, -0.1_539, -0.3_266] ) elif model_name == "videomae-base-ssv2": snake_case_ : Union[str, Any] = torch.Size([1, 1408, 1536] ) snake_case_ : List[str] = torch.tensor([[0.8_131, 0.8_727, 0.8_546], [0.7_366, 0.9_377, 0.8_870], [0.5_935, 0.8_874, 0.8_564]] ) elif model_name == "videomae-base-finetuned-ssv2": snake_case_ : int = torch.Size([1, 174] ) snake_case_ : Any = torch.tensor([0.1_961, -0.8_337, -0.6_389] ) else: raise ValueError(F'''Model name not supported. Should be one of {model_names}''' ) # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] ,__magic_name__ ,atol=1E-4 ) else: print("Logits:" ,logits[0, :3, :3] ) assert torch.allclose(logits[0, :3, :3] ,__magic_name__ ,atol=1E-4 ) print("Logits ok!" ) # verify loss, if applicable if model_name == "videomae-base-short": snake_case_ : Tuple = outputs.loss assert torch.allclose(__magic_name__ ,__magic_name__ ,atol=1E-4 ) print("Loss ok!" ) if pytorch_dump_folder_path is not None: print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__magic_name__ ) model.save_pretrained(__magic_name__ ) if push_to_hub: print("Pushing to the hub..." ) model.push_to_hub(__magic_name__ ,organization="nielsr" ) if __name__ == "__main__": __lowerCamelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4''', type=str, help=( '''URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct''' ''' download link.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''/Users/nielsrogge/Documents/VideoMAE/Test''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--model_name''', default='''videomae-base''', type=str, help='''Name of the model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) __lowerCamelCase : Any = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
656
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal __lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__) __lowerCamelCase : List[str] = ['''names''', '''prefix'''] __lowerCamelCase : int = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols'''] __lowerCamelCase : str = ['''encoding_errors''', '''on_bad_lines'''] __lowerCamelCase : Optional[Any] = ['''date_format'''] @dataclass class A_ (datasets.BuilderConfig ): """simple docstring""" a__ = "," a__ = None a__ = "infer" a__ = None a__ = None a__ = None a__ = None a__ = None a__ = True a__ = None a__ = None a__ = None a__ = None a__ = False a__ = None a__ = None a__ = None a__ = True a__ = True a__ = False a__ = True a__ = None a__ = "." a__ = None a__ = '"' a__ = 0 a__ = None a__ = None a__ = None a__ = None a__ = True a__ = True a__ = 0 a__ = True a__ = False a__ = None a__ = 10000 a__ = None a__ = "strict" a__ = "error" a__ = None def _A ( self :List[str] ) -> Any: '''simple docstring''' if self.delimiter is not None: snake_case_ : Tuple = self.delimiter if self.column_names is not None: snake_case_ : List[Any] = self.column_names @property def _A ( self :Optional[Any] ) -> int: '''simple docstring''' snake_case_ : Optional[int] = { "sep": self.sep, "header": self.header, "names": self.names, "index_col": self.index_col, "usecols": self.usecols, "prefix": self.prefix, "mangle_dupe_cols": self.mangle_dupe_cols, "engine": self.engine, "converters": self.converters, "true_values": self.true_values, "false_values": self.false_values, "skipinitialspace": self.skipinitialspace, "skiprows": self.skiprows, "nrows": self.nrows, "na_values": self.na_values, "keep_default_na": self.keep_default_na, "na_filter": self.na_filter, "verbose": self.verbose, "skip_blank_lines": self.skip_blank_lines, "thousands": self.thousands, "decimal": self.decimal, "lineterminator": self.lineterminator, "quotechar": self.quotechar, "quoting": self.quoting, "escapechar": self.escapechar, "comment": self.comment, "encoding": self.encoding, "dialect": self.dialect, "error_bad_lines": self.error_bad_lines, "warn_bad_lines": self.warn_bad_lines, "skipfooter": self.skipfooter, "doublequote": self.doublequote, "memory_map": self.memory_map, "float_precision": self.float_precision, "chunksize": self.chunksize, "encoding_errors": self.encoding_errors, "on_bad_lines": self.on_bad_lines, "date_format": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class A_ (datasets.ArrowBasedBuilder ): """simple docstring""" a__ = CsvConfig def _A ( self :Optional[Any] ) -> Optional[Any]: '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def _A ( self :Tuple , lowerCAmelCase__ :Dict ) -> List[Any]: '''simple docstring''' if not self.config.data_files: raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) snake_case_ : Optional[Any] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(lowerCAmelCase__ , (str, list, tuple) ): snake_case_ : int = data_files if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): snake_case_ : List[str] = [files] snake_case_ : Tuple = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] snake_case_ : str = [] for split_name, files in data_files.items(): if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): snake_case_ : str = [files] snake_case_ : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files] splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) ) return splits def _A ( self :List[Any] , lowerCAmelCase__ :pa.Table ) -> pa.Table: '''simple docstring''' if self.config.features is not None: snake_case_ : int = self.config.features.arrow_schema if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ): # cheaper cast snake_case_ : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example snake_case_ : Dict = table_cast(lowerCAmelCase__ , lowerCAmelCase__ ) return pa_table def _A ( self :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]: '''simple docstring''' snake_case_ : Tuple = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str snake_case_ : str = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ): snake_case_ : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(lowerCAmelCase__ ): snake_case_ : Optional[int] = pa.Table.from_pandas(lowerCAmelCase__ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ ) except ValueError as e: logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCAmelCase__ )}: {e}''' ) raise
656
1
'''simple docstring''' from collections import deque class A_ : """simple docstring""" def __init__( self :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> None: '''simple docstring''' snake_case_ : List[Any] = process_name # process name snake_case_ : int = arrival_time # arrival time of the process # completion time of finished process or last interrupted time snake_case_ : Union[str, Any] = arrival_time snake_case_ : Tuple = burst_time # remaining burst time snake_case_ : Dict = 0 # total time of the process wait in ready queue snake_case_ : Any = 0 # time from arrival time to completion time class A_ : """simple docstring""" def __init__( self :int , lowerCAmelCase__ :int , lowerCAmelCase__ :list[int] , lowerCAmelCase__ :deque[Process] , lowerCAmelCase__ :int , ) -> None: '''simple docstring''' snake_case_ : int = number_of_queues # time slice of queues that round robin algorithm applied snake_case_ : Any = time_slices # unfinished process is in this ready_queue snake_case_ : Tuple = queue # current time snake_case_ : Union[str, Any] = current_time # finished process is in this sequence queue snake_case_ : deque[Process] = deque() def _A ( self :Tuple ) -> list[str]: '''simple docstring''' snake_case_ : List[Any] = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def _A ( self :List[str] , lowerCAmelCase__ :list[Process] ) -> list[int]: '''simple docstring''' snake_case_ : Optional[int] = [] for i in range(len(lowerCAmelCase__ ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def _A ( self :Optional[int] , lowerCAmelCase__ :list[Process] ) -> list[int]: '''simple docstring''' snake_case_ : str = [] for i in range(len(lowerCAmelCase__ ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def _A ( self :str , lowerCAmelCase__ :list[Process] ) -> list[int]: '''simple docstring''' snake_case_ : str = [] for i in range(len(lowerCAmelCase__ ) ): completion_times.append(queue[i].stop_time ) return completion_times def _A ( self :str , lowerCAmelCase__ :deque[Process] ) -> list[int]: '''simple docstring''' return [q.burst_time for q in queue] def _A ( self :Dict , lowerCAmelCase__ :Process ) -> int: '''simple docstring''' process.waiting_time += self.current_time - process.stop_time return process.waiting_time def _A ( self :Tuple , lowerCAmelCase__ :deque[Process] ) -> deque[Process]: '''simple docstring''' snake_case_ : deque[Process] = deque() # sequence deque of finished process while len(lowerCAmelCase__ ) != 0: snake_case_ : Optional[Any] = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(lowerCAmelCase__ ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 snake_case_ : Tuple = 0 # set the process's turnaround time because it is finished snake_case_ : Union[str, Any] = self.current_time - cp.arrival_time # set the completion time snake_case_ : Tuple = self.current_time # add the process to queue that has finished queue finished.append(lowerCAmelCase__ ) self.finish_queue.extend(lowerCAmelCase__ ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def _A ( self :Dict , lowerCAmelCase__ :deque[Process] , lowerCAmelCase__ :int ) -> tuple[deque[Process], deque[Process]]: '''simple docstring''' snake_case_ : deque[Process] = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(lowerCAmelCase__ ) ): snake_case_ : Any = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(lowerCAmelCase__ ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time snake_case_ : List[Any] = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(lowerCAmelCase__ ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished snake_case_ : int = 0 # set the finish time snake_case_ : Union[str, Any] = self.current_time # update the process' turnaround time because it is finished snake_case_ : Tuple = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(lowerCAmelCase__ ) self.finish_queue.extend(lowerCAmelCase__ ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def _A ( self :List[Any] ) -> deque[Process]: '''simple docstring''' for i in range(self.number_of_queues - 1 ): snake_case_, snake_case_ : Dict = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest __lowerCamelCase : Any = Process('''P1''', 0, 53) __lowerCamelCase : Optional[Any] = Process('''P2''', 0, 17) __lowerCamelCase : Optional[Any] = Process('''P3''', 0, 68) __lowerCamelCase : List[Any] = Process('''P4''', 0, 24) __lowerCamelCase : List[str] = 3 __lowerCamelCase : Dict = [17, 25] __lowerCamelCase : Optional[Any] = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])}) __lowerCamelCase : Any = Process('''P1''', 0, 53) __lowerCamelCase : Optional[Any] = Process('''P2''', 0, 17) __lowerCamelCase : Optional[int] = Process('''P3''', 0, 68) __lowerCamelCase : int = Process('''P4''', 0, 24) __lowerCamelCase : str = 3 __lowerCamelCase : List[Any] = [17, 25] __lowerCamelCase : int = deque([Pa, Pa, Pa, Pa]) __lowerCamelCase : Union[str, Any] = MLFQ(number_of_queues, time_slices, queue, 0) __lowerCamelCase : Optional[int] = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( f'''waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}''' ) # print completion times of processes(P1, P2, P3, P4) print( f'''completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}''' ) # print total turnaround times of processes(P1, P2, P3, P4) print( f'''turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}''' ) # print sequence of finished processes print( f'''sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}''' )
656
'''simple docstring''' import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A_ (a_ , unittest.TestCase ): """simple docstring""" a__ = MgpstrTokenizer a__ = False a__ = {} a__ = False def _A ( self :List[str] ) -> List[str]: '''simple docstring''' super().setUp() # fmt: off snake_case_ : Dict = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"] # fmt: on snake_case_ : List[str] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCAmelCase__ ) + "\n" ) def _A ( self :Optional[Any] , **lowerCAmelCase__ :Optional[Any] ) -> Dict: '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def _A ( self :Dict , lowerCAmelCase__ :Any ) -> str: '''simple docstring''' snake_case_ : Dict = "tester" snake_case_ : Tuple = "tester" return input_text, output_text @unittest.skip("MGP-STR always lower cases letters." ) def _A ( self :Dict ) -> str: '''simple docstring''' pass def _A ( self :Tuple ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[str] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): snake_case_ : Tuple = "[SPECIAL_TOKEN]" tokenizer.add_special_tokens({"cls_token": special_token} ) snake_case_ : str = tokenizer.encode([special_token] , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(len(lowerCAmelCase__ ) , 1 ) snake_case_ : Tuple = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) self.assertTrue(special_token not in decoded ) def _A ( self :int ) -> List[str]: '''simple docstring''' snake_case_ : Dict = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): snake_case_, snake_case_ : str = self.get_input_output_texts(lowerCAmelCase__ ) snake_case_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__ ) snake_case_ : List[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) snake_case_ : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ ) self.assertNotEqual(len(lowerCAmelCase__ ) , 0 ) snake_case_ : List[str] = tokenizer.decode(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertEqual(text_a.replace(" " , "" ) , lowerCAmelCase__ ) @unittest.skip("MGP-STR tokenizer only handles one sequence." ) def _A ( self :Union[str, Any] ) -> Any: '''simple docstring''' pass @unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" ) def _A ( self :int ) -> Dict: '''simple docstring''' pass
656
1
'''simple docstring''' import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class A_ (a_ , unittest.TestCase ): """simple docstring""" pass @nightly @require_onnxruntime @require_torch_gpu class A_ (unittest.TestCase ): """simple docstring""" @property def _A ( self :List[Any] ) -> str: '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _A ( self :int ) -> int: '''simple docstring''' snake_case_ : str = ort.SessionOptions() snake_case_ : List[str] = False return options def _A ( self :Tuple ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[str] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png" ) snake_case_ : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" ) snake_case_ : int = OnnxStableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) snake_case_ : int = "A red cat sitting on a park bench" snake_case_ : Optional[Any] = np.random.RandomState(0 ) snake_case_ : Dict = pipe( prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCAmelCase__ , output_type="np" , ) snake_case_ : Optional[Any] = output.images snake_case_ : Any = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) snake_case_ : Dict = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _A ( self :Optional[int] ) -> List[str]: '''simple docstring''' snake_case_ : Dict = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png" ) snake_case_ : List[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" ) snake_case_ : Optional[Any] = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" ) snake_case_ : Dict = OnnxStableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) snake_case_ : Union[str, Any] = "A red cat sitting on a park bench" snake_case_ : int = np.random.RandomState(0 ) snake_case_ : int = pipe( prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowerCAmelCase__ , output_type="np" , ) snake_case_ : List[Any] = output.images snake_case_ : Dict = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) snake_case_ : Dict = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
656
'''simple docstring''' from __future__ import annotations import math import numpy as np from numpy.linalg import norm def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float: """simple docstring""" return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__magic_name__ ,__magic_name__ ) ) ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> list[list[list[float] | float]]: """simple docstring""" if dataset.ndim != value_array.ndim: snake_case_ : int = ( "Wrong input data's dimensions... " F'''dataset : {dataset.ndim}, value_array : {value_array.ndim}''' ) raise ValueError(__magic_name__ ) try: if dataset.shape[1] != value_array.shape[1]: snake_case_ : Dict = ( "Wrong input data's shape... " F'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}''' ) raise ValueError(__magic_name__ ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError("Wrong shape" ) if dataset.dtype != value_array.dtype: snake_case_ : Dict = ( "Input data have different datatype... " F'''dataset : {dataset.dtype}, value_array : {value_array.dtype}''' ) raise TypeError(__magic_name__ ) snake_case_ : Optional[int] = [] for value in value_array: snake_case_ : List[str] = euclidean(__magic_name__ ,dataset[0] ) snake_case_ : int = dataset[0].tolist() for dataset_value in dataset[1:]: snake_case_ : Optional[Any] = euclidean(__magic_name__ ,__magic_name__ ) if dist > temp_dist: snake_case_ : Tuple = temp_dist snake_case_ : Optional[int] = dataset_value.tolist() answer.append([vector, dist] ) return answer def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float: """simple docstring""" return np.dot(__magic_name__ ,__magic_name__ ) / (norm(__magic_name__ ) * norm(__magic_name__ )) if __name__ == "__main__": import doctest doctest.testmod()
656
1
'''simple docstring''' from sklearn.metrics import mean_squared_error import datasets __lowerCamelCase : Tuple = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' __lowerCamelCase : Tuple = '''\ Mean Squared Error(MSE) is the average of the square of difference between the predicted and actual values. ''' __lowerCamelCase : str = ''' Args: predictions: array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. references: array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. sample_weight: array-like of shape (n_samples,), default=None Sample weights. multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average" Defines aggregating of multiple output values. Array-like value defines weights used to average errors. "raw_values" : Returns a full set of errors in case of multioutput input. "uniform_average" : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value. Returns: mse : mean squared error. Examples: >>> mse_metric = datasets.load_metric("mse") >>> predictions = [2.5, 0.0, 2, 8] >>> references = [3, -0.5, 2, 7] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.375} >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False) >>> print(rmse_result) {\'mse\': 0.6123724356957945} If you\'re using multi-dimensional lists, then set the config as follows : >>> mse_metric = datasets.load_metric("mse", "multilist") >>> predictions = [[0.5, 1], [-1, 1], [7, -6]] >>> references = [[0, 2], [-1, 2], [8, -5]] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.7083333333333334} >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\') >>> print(results) # doctest: +NORMALIZE_WHITESPACE {\'mse\': array([0.41666667, 1. ])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A_ (datasets.Metric ): """simple docstring""" def _A ( self :Union[str, Any] ) -> Optional[Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ "https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html" ] , ) def _A ( self :int ) -> Any: '''simple docstring''' if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value("float" ) ), "references": datasets.Sequence(datasets.Value("float" ) ), } else: return { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } def _A ( self :Union[str, Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :str="uniform_average" , lowerCAmelCase__ :Union[str, Any]=True ) -> Tuple: '''simple docstring''' snake_case_ : Any = mean_squared_error( lowerCAmelCase__ , lowerCAmelCase__ , sample_weight=lowerCAmelCase__ , multioutput=lowerCAmelCase__ , squared=lowerCAmelCase__ ) return {"mse": mse}
656
'''simple docstring''' import fire from utils import calculate_rouge, save_json def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__=None ,**__magic_name__ )-> Optional[Any]: """simple docstring""" snake_case_ : int = [x.strip() for x in open(__magic_name__ ).readlines()] snake_case_ : Optional[int] = [x.strip() for x in open(__magic_name__ ).readlines()][: len(__magic_name__ )] snake_case_ : List[Any] = calculate_rouge(__magic_name__ ,__magic_name__ ,**__magic_name__ ) if save_path is not None: save_json(__magic_name__ ,__magic_name__ ,indent=__magic_name__ ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
656
1
'''simple docstring''' __lowerCamelCase : int = {str(digit): digit**5 for digit in range(10)} def __UpperCAmelCase ( __magic_name__ )-> int: """simple docstring""" return sum(DIGITS_FIFTH_POWER[digit] for digit in str(__magic_name__ ) ) def __UpperCAmelCase ( )-> int: """simple docstring""" return sum( number for number in range(1000 ,100_0000 ) if number == digits_fifth_powers_sum(__magic_name__ ) ) if __name__ == "__main__": print(solution())
656
'''simple docstring''' import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) __lowerCamelCase : Optional[Any] = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''', f'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''', f'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias''')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ('''input_proj.weight''', '''input_projection.weight'''), ('''input_proj.bias''', '''input_projection.bias'''), ('''query_embed.weight''', '''query_position_embeddings.weight'''), ('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''), ('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''), ('''class_embed.weight''', '''class_labels_classifier.weight'''), ('''class_embed.bias''', '''class_labels_classifier.bias'''), ('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''), ('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''), ('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''), ('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''), ('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''), ('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''), ('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''), ('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''), ('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''), ('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''), ('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''), ('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''), ('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''), ('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''), ('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''), ('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''), ] ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int: """simple docstring""" snake_case_ : Optional[Any] = state_dict.pop(__magic_name__ ) snake_case_ : Any = val def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]: """simple docstring""" snake_case_ : Any = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: snake_case_ : Optional[Any] = key.replace("backbone.0.body" ,"backbone.conv_encoder.model" ) snake_case_ : int = value else: snake_case_ : int = value return new_state_dict def __UpperCAmelCase ( __magic_name__ ,__magic_name__=False )-> Optional[int]: """simple docstring""" snake_case_ : str = "" if is_panoptic: snake_case_ : Dict = "conditional_detr." # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) snake_case_ : Any = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) snake_case_ : Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict snake_case_ : Tuple = in_proj_weight[:256, :] snake_case_ : List[Any] = in_proj_bias[:256] snake_case_ : Optional[Any] = in_proj_weight[256:512, :] snake_case_ : Optional[int] = in_proj_bias[256:512] snake_case_ : Optional[int] = in_proj_weight[-256:, :] snake_case_ : str = in_proj_bias[-256:] def __UpperCAmelCase ( )-> Optional[Any]: """simple docstring""" snake_case_ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg" snake_case_ : Optional[Any] = Image.open(requests.get(__magic_name__ ,stream=__magic_name__ ).raw ) return im @torch.no_grad() def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[str]: """simple docstring""" snake_case_ : Optional[Any] = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: snake_case_ : Optional[Any] = "resnet101" if "dc5" in model_name: snake_case_ : List[str] = True snake_case_ : Tuple = "panoptic" in model_name if is_panoptic: snake_case_ : List[Any] = 250 else: snake_case_ : Optional[Any] = 91 snake_case_ : Optional[int] = "huggingface/label-files" snake_case_ : Dict = "coco-detection-id2label.json" snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) ) snake_case_ : Optional[int] = {int(__magic_name__ ): v for k, v in idalabel.items()} snake_case_ : int = idalabel snake_case_ : Dict = {v: k for k, v in idalabel.items()} # load image processor snake_case_ : Optional[int] = "coco_panoptic" if is_panoptic else "coco_detection" snake_case_ : str = ConditionalDetrImageProcessor(format=__magic_name__ ) # prepare image snake_case_ : str = prepare_img() snake_case_ : int = image_processor(images=__magic_name__ ,return_tensors="pt" ) snake_case_ : Union[str, Any] = encoding["pixel_values"] logger.info(F'''Converting model {model_name}...''' ) # load original model from torch hub snake_case_ : Union[str, Any] = torch.hub.load("DeppMeng/ConditionalDETR" ,__magic_name__ ,pretrained=__magic_name__ ).eval() snake_case_ : Any = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: snake_case_ : Any = "conditional_detr." + src rename_key(__magic_name__ ,__magic_name__ ,__magic_name__ ) snake_case_ : Tuple = rename_backbone_keys(__magic_name__ ) # query, key and value matrices need special treatment read_in_q_k_v(__magic_name__ ,is_panoptic=__magic_name__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them snake_case_ : int = "conditional_detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("conditional_detr" ) and not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ) ): snake_case_ : Any = state_dict.pop(__magic_name__ ) snake_case_ : Optional[int] = val elif "class_labels_classifier" in key or "bbox_predictor" in key: snake_case_ : Tuple = state_dict.pop(__magic_name__ ) snake_case_ : Any = val elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ): continue else: snake_case_ : Union[str, Any] = state_dict.pop(__magic_name__ ) snake_case_ : List[Any] = val else: if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): snake_case_ : Any = state_dict.pop(__magic_name__ ) snake_case_ : List[Any] = val # finally, create HuggingFace model and load state dict snake_case_ : Optional[int] = ConditionalDetrForSegmentation(__magic_name__ ) if is_panoptic else ConditionalDetrForObjectDetection(__magic_name__ ) model.load_state_dict(__magic_name__ ) model.eval() model.push_to_hub(repo_id=__magic_name__ ,organization="DepuMeng" ,commit_message="Add model" ) # verify our conversion snake_case_ : Dict = conditional_detr(__magic_name__ ) snake_case_ : Union[str, Any] = model(__magic_name__ ) assert torch.allclose(outputs.logits ,original_outputs["pred_logits"] ,atol=1E-4 ) assert torch.allclose(outputs.pred_boxes ,original_outputs["pred_boxes"] ,atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks ,original_outputs["pred_masks"] ,atol=1E-4 ) # Save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) model.save_pretrained(__magic_name__ ) image_processor.save_pretrained(__magic_name__ ) if __name__ == "__main__": __lowerCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''conditional_detr_resnet50''', type=str, help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) __lowerCamelCase : int = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
656
1
'''simple docstring''' from __future__ import annotations def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> bool: """simple docstring""" snake_case_ : Union[str, Any] = get_failure_array(__magic_name__ ) # 2) Step through text searching for pattern snake_case_, snake_case_ : List[Any] = 0, 0 # index into text, pattern while i < len(__magic_name__ ): if pattern[j] == text[i]: if j == (len(__magic_name__ ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: snake_case_ : List[Any] = failure[j - 1] continue i += 1 return False def __UpperCAmelCase ( __magic_name__ )-> list[int]: """simple docstring""" snake_case_ : List[str] = [0] snake_case_ : List[str] = 0 snake_case_ : Any = 1 while j < len(__magic_name__ ): if pattern[i] == pattern[j]: i += 1 elif i > 0: snake_case_ : Optional[int] = failure[i - 1] continue j += 1 failure.append(__magic_name__ ) return failure if __name__ == "__main__": # Test 1) __lowerCamelCase : Tuple = '''abc1abc12''' __lowerCamelCase : List[str] = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' __lowerCamelCase : Optional[Any] = '''alskfjaldsk23adsfabcabc''' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) __lowerCamelCase : int = '''ABABX''' __lowerCamelCase : Dict = '''ABABZABABYABABX''' assert kmp(pattern, text) # Test 3) __lowerCamelCase : Tuple = '''AAAB''' __lowerCamelCase : Dict = '''ABAAAAAB''' assert kmp(pattern, text) # Test 4) __lowerCamelCase : int = '''abcdabcy''' __lowerCamelCase : Tuple = '''abcxabcdabxabcdabcdabcy''' assert kmp(pattern, text) # Test 5) __lowerCamelCase : Union[str, Any] = '''aabaabaaa''' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
656
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import XLMRobertaTokenizer from diffusers import ( AltDiffusionImgaImgPipeline, AutoencoderKL, PNDMScheduler, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class A_ (unittest.TestCase ): """simple docstring""" def _A ( self :Any ) -> str: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _A ( self :List[Any] ) -> List[str]: '''simple docstring''' snake_case_ : Any = 1 snake_case_ : Dict = 3 snake_case_ : Union[str, Any] = (32, 32) snake_case_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ ) return image @property def _A ( self :Optional[int] ) -> Any: '''simple docstring''' torch.manual_seed(0 ) snake_case_ : List[str] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) return model @property def _A ( self :Dict ) -> Any: '''simple docstring''' torch.manual_seed(0 ) snake_case_ : Optional[Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) return model @property def _A ( self :Dict ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) snake_case_ : str = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , ) return RobertaSeriesModelWithTransformation(lowerCAmelCase__ ) @property def _A ( self :Any ) -> str: '''simple docstring''' def extract(*lowerCAmelCase__ :Any , **lowerCAmelCase__ :List[str] ): class A_ : """simple docstring""" def __init__( self :Optional[int] ) -> List[str]: '''simple docstring''' snake_case_ : str = torch.ones([0] ) def _A ( self :int , lowerCAmelCase__ :List[Any] ) -> Tuple: '''simple docstring''' self.pixel_values.to(lowerCAmelCase__ ) return self return Out() return extract def _A ( self :int ) -> Dict: '''simple docstring''' snake_case_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator snake_case_ : str = self.dummy_cond_unet snake_case_ : Optional[int] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ ) snake_case_ : Dict = self.dummy_vae snake_case_ : Dict = self.dummy_text_encoder snake_case_ : Optional[int] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" ) snake_case_ : str = 77 snake_case_ : Any = self.dummy_image.to(lowerCAmelCase__ ) snake_case_ : Tuple = init_image / 2 + 0.5 # make sure here that pndm scheduler skips prk snake_case_ : Optional[Any] = AltDiffusionImgaImgPipeline( unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , ) snake_case_ : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ ) snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ ) alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) snake_case_ : Dict = "A painting of a squirrel eating a burger" snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 ) snake_case_ : Dict = alt_pipe( [prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , ) snake_case_ : Any = output.images snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 ) snake_case_ : Optional[Any] = alt_pipe( [prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0] snake_case_ : Tuple = image[0, -3:, -3:, -1] snake_case_ : Dict = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case_ : int = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def _A ( self :int ) -> List[str]: '''simple docstring''' snake_case_ : Union[str, Any] = self.dummy_cond_unet snake_case_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ ) snake_case_ : int = self.dummy_vae snake_case_ : List[Any] = self.dummy_text_encoder snake_case_ : int = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" ) snake_case_ : int = 77 snake_case_ : Dict = self.dummy_image.to(lowerCAmelCase__ ) # put models in fp16 snake_case_ : Optional[Any] = unet.half() snake_case_ : Tuple = vae.half() snake_case_ : List[str] = bert.half() # make sure here that pndm scheduler skips prk snake_case_ : Optional[int] = AltDiffusionImgaImgPipeline( unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , ) snake_case_ : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ ) snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ ) alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) snake_case_ : List[Any] = "A painting of a squirrel eating a burger" snake_case_ : str = torch.manual_seed(0 ) snake_case_ : Any = alt_pipe( [prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , ).images assert image.shape == (1, 32, 32, 3) @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def _A ( self :Optional[int] ) -> Any: '''simple docstring''' snake_case_ : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) # resize to resolution that is divisible by 8 but not 16 or 32 snake_case_ : str = init_image.resize((760, 504) ) snake_case_ : Optional[Any] = "BAAI/AltDiffusion" snake_case_ : int = AltDiffusionImgaImgPipeline.from_pretrained( lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() snake_case_ : Tuple = "A fantasy landscape, trending on artstation" snake_case_ : int = torch.manual_seed(0 ) snake_case_ : List[str] = pipe( prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , ) snake_case_ : str = output.images[0] snake_case_ : List[Any] = image[255:258, 383:386, -1] assert image.shape == (504, 760, 3) snake_case_ : Tuple = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class A_ (unittest.TestCase ): """simple docstring""" def _A ( self :Optional[Any] ) -> Optional[int]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _A ( self :str ) -> Any: '''simple docstring''' snake_case_ : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) snake_case_ : List[Any] = init_image.resize((768, 512) ) snake_case_ : Tuple = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" ) snake_case_ : Any = "BAAI/AltDiffusion" snake_case_ : List[str] = AltDiffusionImgaImgPipeline.from_pretrained( lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() snake_case_ : Tuple = "A fantasy landscape, trending on artstation" snake_case_ : Tuple = torch.manual_seed(0 ) snake_case_ : List[Any] = pipe( prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , ) snake_case_ : Optional[int] = output.images[0] assert image.shape == (512, 768, 3) # img2img is flaky across GPUs even in fp32, so using MAE here assert np.abs(expected_image - image ).max() < 1E-2
656
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor __lowerCamelCase : List[str] = logging.get_logger(__name__) class A_ (a_ ): """simple docstring""" def __init__( self :List[str] , *lowerCAmelCase__ :Dict , **lowerCAmelCase__ :Optional[Any] ) -> None: '''simple docstring''' warnings.warn( "The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use LayoutLMv2ImageProcessor instead." , lowerCAmelCase__ , ) super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
656
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. __lowerCamelCase : List[str] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''} @is_pipeline_test class A_ (unittest.TestCase ): """simple docstring""" a__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING a__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: a__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: a__ = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def _A ( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict ) -> Any: '''simple docstring''' snake_case_ : Optional[Any] = ZeroShotClassificationPipeline( model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , candidate_labels=["polics", "health"] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def _A ( self :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[Any]: '''simple docstring''' snake_case_ : Tuple = classifier("Who are you voting for in 2020?" , candidate_labels="politics" ) self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} ) # No kwarg snake_case_ : List[Any] = classifier("Who are you voting for in 2020?" , ["politics"] ) self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} ) snake_case_ : Dict = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] ) self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} ) snake_case_ : int = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" ) self.assertEqual( lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 ) snake_case_ : Optional[int] = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] ) self.assertEqual( lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 ) snake_case_ : str = classifier( "Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" ) self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} ) # https://github.com/huggingface/transformers/issues/13846 snake_case_ : Dict = classifier(["I am happy"] , ["positive", "negative"] ) self.assertEqual( lowerCAmelCase__ , [ {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} for i in range(1 ) ] , ) snake_case_ : Tuple = classifier(["I am happy", "I am sad"] , ["positive", "negative"] ) self.assertEqual( lowerCAmelCase__ , [ {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} for i in range(2 ) ] , ) with self.assertRaises(lowerCAmelCase__ ): classifier("" , candidate_labels="politics" ) with self.assertRaises(lowerCAmelCase__ ): classifier(lowerCAmelCase__ , candidate_labels="politics" ) with self.assertRaises(lowerCAmelCase__ ): classifier("Who are you voting for in 2020?" , candidate_labels="" ) with self.assertRaises(lowerCAmelCase__ ): classifier("Who are you voting for in 2020?" , candidate_labels=lowerCAmelCase__ ) with self.assertRaises(lowerCAmelCase__ ): classifier( "Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , ) with self.assertRaises(lowerCAmelCase__ ): classifier( "Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=lowerCAmelCase__ , ) self.run_entailment_id(lowerCAmelCase__ ) def _A ( self :List[Any] , lowerCAmelCase__ :Pipeline ) -> Union[str, Any]: '''simple docstring''' snake_case_ : int = zero_shot_classifier.model.config snake_case_ : Optional[int] = config.labelaid snake_case_ : Tuple = zero_shot_classifier.entailment_id snake_case_ : Optional[Any] = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2} self.assertEqual(zero_shot_classifier.entailment_id , -1 ) snake_case_ : Tuple = {"entailment": 0, "neutral": 1, "contradiction": 2} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) snake_case_ : str = {"ENTAIL": 0, "NON-ENTAIL": 1} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) snake_case_ : str = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0} self.assertEqual(zero_shot_classifier.entailment_id , 2 ) snake_case_ : List[str] = original_labelaid self.assertEqual(lowerCAmelCase__ , zero_shot_classifier.entailment_id ) @require_torch def _A ( self :Tuple ) -> Any: '''simple docstring''' snake_case_ : List[Any] = pipeline( "zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( "Who are you voting for in 2020?" * 100 , candidate_labels=["politics", "public health", "science"] ) @require_torch def _A ( self :Optional[Any] ) -> Tuple: '''simple docstring''' snake_case_ : Union[str, Any] = pipeline( "zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , ) snake_case_ : int = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { "sequence": "Who are you voting for in 2020?", "labels": ["science", "public health", "politics"], "scores": [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @require_tf def _A ( self :Union[str, Any] ) -> Dict: '''simple docstring''' snake_case_ : List[str] = pipeline( "zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , ) snake_case_ : Optional[int] = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { "sequence": "Who are you voting for in 2020?", "labels": ["science", "public health", "politics"], "scores": [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @slow @require_torch def _A ( self :Union[str, Any] ) -> int: '''simple docstring''' snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" ) snake_case_ : str = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { "sequence": "Who are you voting for in 2020?", "labels": ["politics", "public health", "science"], "scores": [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) snake_case_ : Optional[int] = zero_shot_classifier( "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks" " in an encoder-decoder configuration. The best performing models also connect the encoder and decoder" " through an attention mechanism. We propose a new simple network architecture, the Transformer, based" " solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two" " machine translation tasks show these models to be superior in quality while being more parallelizable" " and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014" " English-to-German translation task, improving over the existing best results, including ensembles by" " over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new" " single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small" " fraction of the training costs of the best models from the literature. We show that the Transformer" " generalizes well to other tasks by applying it successfully to English constituency parsing both with" " large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { "sequence": ( "The dominant sequence transduction models are based on complex recurrent or convolutional neural" " networks in an encoder-decoder configuration. The best performing models also connect the" " encoder and decoder through an attention mechanism. We propose a new simple network" " architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence" " and convolutions entirely. Experiments on two machine translation tasks show these models to be" " superior in quality while being more parallelizable and requiring significantly less time to" " train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task," " improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014" " English-to-French translation task, our model establishes a new single-model state-of-the-art" " BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training" " costs of the best models from the literature. We show that the Transformer generalizes well to" " other tasks by applying it successfully to English constituency parsing both with large and" " limited training data." ), "labels": ["translation", "machine learning", "vision", "statistics"], "scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , ) @slow @require_tf def _A ( self :List[str] ) -> str: '''simple docstring''' snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" ) snake_case_ : Optional[Any] = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { "sequence": "Who are you voting for in 2020?", "labels": ["politics", "public health", "science"], "scores": [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) snake_case_ : Tuple = zero_shot_classifier( "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks" " in an encoder-decoder configuration. The best performing models also connect the encoder and decoder" " through an attention mechanism. We propose a new simple network architecture, the Transformer, based" " solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two" " machine translation tasks show these models to be superior in quality while being more parallelizable" " and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014" " English-to-German translation task, improving over the existing best results, including ensembles by" " over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new" " single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small" " fraction of the training costs of the best models from the literature. We show that the Transformer" " generalizes well to other tasks by applying it successfully to English constituency parsing both with" " large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { "sequence": ( "The dominant sequence transduction models are based on complex recurrent or convolutional neural" " networks in an encoder-decoder configuration. The best performing models also connect the" " encoder and decoder through an attention mechanism. We propose a new simple network" " architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence" " and convolutions entirely. Experiments on two machine translation tasks show these models to be" " superior in quality while being more parallelizable and requiring significantly less time to" " train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task," " improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014" " English-to-French translation task, our model establishes a new single-model state-of-the-art" " BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training" " costs of the best models from the literature. We show that the Transformer generalizes well to" " other tasks by applying it successfully to English constituency parsing both with large and" " limited training data." ), "labels": ["translation", "machine learning", "vision", "statistics"], "scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , )
656
1
'''simple docstring''' from scipy.stats import spearmanr import datasets __lowerCamelCase : str = ''' The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. ''' __lowerCamelCase : int = ''' Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {\'spearmanr\': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results[\'spearmanr\']) -0.7 >>> print(round(results[\'spearmanr_pvalue\'], 2)) 0.19 ''' __lowerCamelCase : List[str] = R'''\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A_ (datasets.Metric ): """simple docstring""" def _A ( self :str ) -> Union[str, Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , ) def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any]=False ) -> List[Any]: '''simple docstring''' snake_case_ : Optional[Any] = spearmanr(lowerCAmelCase__ , lowerCAmelCase__ ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
656
'''simple docstring''' import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''): raise Exception('''requires fairseq >= 1.0.0a''') logging.set_verbosity_info() __lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) __lowerCamelCase : Union[str, Any] = '''Hello world! cécé herlolip''' def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[Any]: """simple docstring""" snake_case_ : str = FairseqRobertaModel.from_pretrained(__magic_name__ ) roberta.eval() # disable dropout snake_case_ : Dict = roberta.model.encoder.sentence_encoder snake_case_ : List[str] = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,) if classification_head: snake_case_ : List[str] = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0] print("Our RoBERTa config:" ,__magic_name__ ) snake_case_ : List[str] = XLMRobertaXLForSequenceClassification(__magic_name__ ) if classification_head else XLMRobertaXLForMaskedLM(__magic_name__ ) model.eval() # Now let's copy all the weights. # Embeddings snake_case_ : List[Any] = roberta_sent_encoder.embed_tokens.weight snake_case_ : int = roberta_sent_encoder.embed_positions.weight snake_case_ : Union[str, Any] = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. snake_case_ : Union[str, Any] = roberta_sent_encoder.layer_norm.weight snake_case_ : str = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer snake_case_ : BertLayer = model.roberta.encoder.layer[i] snake_case_ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i] snake_case_ : RobertaAttention = layer.attention snake_case_ : Dict = roberta_layer.self_attn_layer_norm.weight snake_case_ : Dict = roberta_layer.self_attn_layer_norm.bias # self attention snake_case_ : BertSelfAttention = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) snake_case_ : Dict = roberta_layer.self_attn.q_proj.weight snake_case_ : Any = roberta_layer.self_attn.q_proj.bias snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.weight snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.bias snake_case_ : Optional[int] = roberta_layer.self_attn.v_proj.weight snake_case_ : Any = roberta_layer.self_attn.v_proj.bias # self-attention output snake_case_ : BertSelfOutput = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape snake_case_ : List[str] = roberta_layer.self_attn.out_proj.weight snake_case_ : Optional[int] = roberta_layer.self_attn.out_proj.bias # this one is final layer norm snake_case_ : int = roberta_layer.final_layer_norm.weight snake_case_ : Union[str, Any] = roberta_layer.final_layer_norm.bias # intermediate snake_case_ : BertIntermediate = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape snake_case_ : List[str] = roberta_layer.fca.weight snake_case_ : List[Any] = roberta_layer.fca.bias # output snake_case_ : BertOutput = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape snake_case_ : Any = roberta_layer.fca.weight snake_case_ : Any = roberta_layer.fca.bias # end of layer if classification_head: snake_case_ : int = roberta.model.classification_heads["mnli"].dense.weight snake_case_ : Union[str, Any] = roberta.model.classification_heads["mnli"].dense.bias snake_case_ : Tuple = roberta.model.classification_heads["mnli"].out_proj.weight snake_case_ : str = roberta.model.classification_heads["mnli"].out_proj.bias else: # LM Head snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.dense.weight snake_case_ : int = roberta.model.encoder.lm_head.dense.bias snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.weight snake_case_ : Optional[int] = roberta.model.encoder.lm_head.layer_norm.bias snake_case_ : int = roberta.model.encoder.lm_head.weight snake_case_ : List[str] = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. snake_case_ : torch.Tensor = roberta.encode(__magic_name__ ).unsqueeze(0 ) # batch of size 1 snake_case_ : Union[str, Any] = model(__magic_name__ )[0] if classification_head: snake_case_ : Optional[Any] = roberta.model.classification_heads["mnli"](roberta.extract_features(__magic_name__ ) ) else: snake_case_ : List[str] = roberta.model(__magic_name__ )[0] print(our_output.shape ,their_output.shape ) snake_case_ : str = torch.max(torch.abs(our_output - their_output ) ).item() print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 snake_case_ : Any = torch.allclose(__magic_name__ ,__magic_name__ ,atol=1E-3 ) print("Do both models output the same tensors?" ,"🔥" if success else "💩" ) if not success: raise Exception("Something went wRoNg" ) pathlib.Path(__magic_name__ ).mkdir(parents=__magic_name__ ,exist_ok=__magic_name__ ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__magic_name__ ) if __name__ == "__main__": __lowerCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.''' ) __lowerCamelCase : Tuple = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
656
1
'''simple docstring''' def __UpperCAmelCase ( __magic_name__ )-> bool: """simple docstring""" return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") ) def __UpperCAmelCase ( __magic_name__ )-> bool: """simple docstring""" snake_case_ : Optional[int] = credit_card_number snake_case_ : Optional[Any] = 0 snake_case_ : Optional[int] = len(__magic_name__ ) - 2 for i in range(__magic_name__ ,-1 ,-2 ): # double the value of every second digit snake_case_ : List[str] = int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 snake_case_ : Tuple = cc_number[:i] + str(__magic_name__ ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(__magic_name__ ) - 1 ,-1 ,-2 ): total += int(cc_number[i] ) return total % 10 == 0 def __UpperCAmelCase ( __magic_name__ )-> bool: """simple docstring""" snake_case_ : List[Any] = F'''{credit_card_number} is an invalid credit card number because''' if not credit_card_number.isdigit(): print(F'''{error_message} it has nonnumerical characters.''' ) return False if not 13 <= len(__magic_name__ ) <= 16: print(F'''{error_message} of its length.''' ) return False if not validate_initial_digits(__magic_name__ ): print(F'''{error_message} of its first two digits.''' ) return False if not luhn_validation(__magic_name__ ): print(F'''{error_message} it fails the Luhn check.''' ) return False print(F'''{credit_card_number} is a valid credit card number.''' ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number('''4111111111111111''') validate_credit_card_number('''32323''')
656
'''simple docstring''' import os import sys import tempfile import torch from .state import AcceleratorState from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=None ,__magic_name__="no" ,__magic_name__="29500" )-> Optional[int]: """simple docstring""" snake_case_ : str = False snake_case_ : int = False if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ): snake_case_ : Any = True elif "IPython" in sys.modules: snake_case_ : Union[str, Any] = "google.colab" in str(sys.modules["IPython"].get_ipython() ) try: snake_case_ : Any = PrecisionType(mixed_precision.lower() ) except ValueError: raise ValueError( F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' ) if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" ,__magic_name__ ) is not None): # TPU launch import torch_xla.distributed.xla_multiprocessing as xmp if len(AcceleratorState._shared_state ) > 0: raise ValueError( "To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside " "your training function. Restart your notebook and make sure no cells initializes an " "`Accelerator`." ) if num_processes is None: snake_case_ : Tuple = 8 snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="TPU" ) print(F'''Launching a training on {num_processes} TPU cores.''' ) xmp.spawn(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" ) elif in_colab: # No need for a distributed launch otherwise as it's either CPU or one GPU. if torch.cuda.is_available(): print("Launching training on one GPU." ) else: print("Launching training on one CPU." ) function(*__magic_name__ ) else: if num_processes is None: raise ValueError( "You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." ) if num_processes > 1: # Multi-GPU launch from torch.multiprocessing import start_processes from torch.multiprocessing.spawn import ProcessRaisedException if len(AcceleratorState._shared_state ) > 0: raise ValueError( "To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized " "inside your training function. Restart your notebook and make sure no cells initializes an " "`Accelerator`." ) if torch.cuda.is_initialized(): raise ValueError( "To launch a multi-GPU training from your notebook, you need to avoid running any instruction " "using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA " "function." ) # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=__magic_name__ ,master_addr="127.0.01" ,master_port=__magic_name__ ,mixed_precision=__magic_name__ ): snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="MULTI_GPU" ) print(F'''Launching training on {num_processes} GPUs.''' ) try: start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" ) except ProcessRaisedException as e: if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: raise RuntimeError( "CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. " "This likely stems from an outside import causing issues once the `notebook_launcher()` is called. " "Please review your imports and test them when running the `notebook_launcher()` to identify " "which one is problematic." ) from e else: # No need for a distributed launch otherwise as it's either CPU, GPU or MPS. if is_mps_available(): snake_case_ : Any = "1" print("Launching training on MPS." ) elif torch.cuda.is_available(): print("Launching training on one GPU." ) else: print("Launching training on CPU." ) function(*__magic_name__ ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=2 )-> Dict: """simple docstring""" from torch.multiprocessing import start_processes with tempfile.NamedTemporaryFile() as tmp_file: # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=__magic_name__ ,master_addr="127.0.01" ,master_port="29500" ,accelerate_mixed_precision="no" ,accelerate_debug_rdv_file=tmp_file.name ,accelerate_use_cpu="yes" ,): snake_case_ : Any = PrepareForLaunch(__magic_name__ ,debug=__magic_name__ ) start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
656
1
'''simple docstring''' import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A_ : """simple docstring""" def __init__( self :List[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Union[str, Any]=13 , lowerCAmelCase__ :int=[30, 30] , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :Tuple=3 , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :str=32 , lowerCAmelCase__ :Tuple=5 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :str=37 , lowerCAmelCase__ :List[str]="gelu" , lowerCAmelCase__ :Any=0.1 , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Union[str, Any]=10 , lowerCAmelCase__ :int=0.0_2 , lowerCAmelCase__ :str=3 , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :List[Any]=8 , lowerCAmelCase__ :Optional[Any]=10 , ) -> int: '''simple docstring''' snake_case_ : str = parent snake_case_ : Optional[int] = batch_size snake_case_ : Union[str, Any] = image_size snake_case_ : str = patch_size snake_case_ : List[str] = num_channels snake_case_ : str = is_training snake_case_ : int = use_labels snake_case_ : Optional[Any] = hidden_size snake_case_ : int = num_hidden_layers snake_case_ : int = num_attention_heads snake_case_ : List[Any] = intermediate_size snake_case_ : Tuple = hidden_act snake_case_ : List[Any] = hidden_dropout_prob snake_case_ : int = attention_probs_dropout_prob snake_case_ : Union[str, Any] = type_sequence_label_size snake_case_ : Dict = initializer_range snake_case_ : List[str] = num_labels snake_case_ : Optional[Any] = scope snake_case_ : Dict = n_targets snake_case_ : int = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens snake_case_ : Union[str, Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size) snake_case_ : List[str] = num_patches + 1 + self.num_detection_tokens def _A ( self :List[str] ) -> List[Any]: '''simple docstring''' snake_case_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] ) snake_case_ : Dict = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) snake_case_ : Tuple = [] for i in range(self.batch_size ): snake_case_ : Tuple = {} snake_case_ : Optional[int] = torch.randint( high=self.num_labels , size=(self.n_targets,) , device=lowerCAmelCase__ ) snake_case_ : int = torch.rand(self.n_targets , 4 , device=lowerCAmelCase__ ) labels.append(lowerCAmelCase__ ) snake_case_ : Dict = self.get_config() return config, pixel_values, labels def _A ( self :Optional[int] ) -> Dict: '''simple docstring''' return YolosConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , ) def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str] ) -> Any: '''simple docstring''' snake_case_ : Any = YolosModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ : Tuple = model(lowerCAmelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) ) def _A ( self :int , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple ) -> List[Any]: '''simple docstring''' snake_case_ : Optional[Any] = YolosForObjectDetection(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ : Dict = model(pixel_values=lowerCAmelCase__ ) snake_case_ : str = model(lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) snake_case_ : Optional[int] = model(pixel_values=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) def _A ( self :List[str] ) -> Any: '''simple docstring''' snake_case_ : Optional[Any] = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_ : int = config_and_inputs snake_case_ : str = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class A_ (a_ , a_ , unittest.TestCase ): """simple docstring""" a__ = (YolosModel, YolosForObjectDetection) if is_torch_available() else () a__ = ( {'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {} ) a__ = False a__ = False a__ = False a__ = False def _A ( self :int , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[str]=False ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[str] = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ ) if return_labels: if model_class.__name__ == "YolosForObjectDetection": snake_case_ : Optional[Any] = [] for i in range(self.model_tester.batch_size ): snake_case_ : Optional[Any] = {} snake_case_ : str = torch.ones( size=(self.model_tester.n_targets,) , device=lowerCAmelCase__ , dtype=torch.long ) snake_case_ : int = torch.ones( self.model_tester.n_targets , 4 , device=lowerCAmelCase__ , dtype=torch.float ) labels.append(lowerCAmelCase__ ) snake_case_ : Any = labels return inputs_dict def _A ( self :Tuple ) -> Tuple: '''simple docstring''' snake_case_ : Dict = YolosModelTester(self ) snake_case_ : str = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 ) def _A ( self :Optional[int] ) -> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() def _A ( self :str ) -> Union[str, Any]: '''simple docstring''' pass def _A ( self :Tuple ) -> Optional[Any]: '''simple docstring''' snake_case_, snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : Any = model_class(lowerCAmelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case_ : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) ) def _A ( self :int ) -> Optional[Any]: '''simple docstring''' snake_case_, snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : Tuple = model_class(lowerCAmelCase__ ) snake_case_ : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ : List[Any] = [*signature.parameters.keys()] snake_case_ : List[Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCAmelCase__ ) def _A ( self :Optional[int] ) -> Any: '''simple docstring''' snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def _A ( self :Tuple ) -> Union[str, Any]: '''simple docstring''' snake_case_, snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : int = True # in YOLOS, the seq_len is different snake_case_ : Dict = self.model_tester.expected_seq_len for model_class in self.all_model_classes: snake_case_ : Dict = True snake_case_ : Tuple = False snake_case_ : str = True snake_case_ : List[Any] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): snake_case_ : Union[str, Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) snake_case_ : str = outputs.attentions self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] snake_case_ : Optional[int] = True snake_case_ : Union[str, Any] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): snake_case_ : Optional[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) snake_case_ : int = outputs.attentions self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) snake_case_ : Any = len(lowerCAmelCase__ ) # Check attention is always last and order is fine snake_case_ : List[Any] = True snake_case_ : Optional[Any] = True snake_case_ : Any = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): snake_case_ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) snake_case_ : Optional[Any] = 1 self.assertEqual(out_len + added_hidden_states , len(lowerCAmelCase__ ) ) snake_case_ : Dict = outputs.attentions self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def _A ( self :Dict ) -> List[Any]: '''simple docstring''' def check_hidden_states_output(lowerCAmelCase__ :str , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] ): snake_case_ : int = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): snake_case_ : Tuple = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) snake_case_ : Union[str, Any] = outputs.hidden_states snake_case_ : int = getattr( self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) # YOLOS has a different seq_length snake_case_ : int = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) snake_case_, snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : List[Any] = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ : Optional[int] = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def _A ( self :str ) -> Any: '''simple docstring''' snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*lowerCAmelCase__ ) @slow def _A ( self :Optional[int] ) -> int: '''simple docstring''' for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : List[Any] = YolosModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) def __UpperCAmelCase ( )-> int: """simple docstring""" snake_case_ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class A_ (unittest.TestCase ): """simple docstring""" @cached_property def _A ( self :Tuple ) -> List[str]: '''simple docstring''' return AutoImageProcessor.from_pretrained("hustvl/yolos-small" ) if is_vision_available() else None @slow def _A ( self :Tuple ) -> Tuple: '''simple docstring''' snake_case_ : str = YolosForObjectDetection.from_pretrained("hustvl/yolos-small" ).to(lowerCAmelCase__ ) snake_case_ : Dict = self.default_image_processor snake_case_ : List[str] = prepare_img() snake_case_ : List[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ ) # forward pass with torch.no_grad(): snake_case_ : Optional[int] = model(inputs.pixel_values ) # verify outputs snake_case_ : int = torch.Size((1, 100, 92) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase__ ) snake_case_ : str = torch.tensor( [[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] , device=lowerCAmelCase__ , ) snake_case_ : Optional[int] = torch.tensor( [[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=lowerCAmelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) ) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) ) # verify postprocessing snake_case_ : Optional[int] = image_processor.post_process_object_detection( lowerCAmelCase__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0] snake_case_ : int = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(lowerCAmelCase__ ) snake_case_ : List[str] = [75, 75, 17, 63, 17] snake_case_ : Tuple = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5] ).to(lowerCAmelCase__ ) self.assertEqual(len(results["scores"] ) , 5 ) self.assertTrue(torch.allclose(results["scores"] , lowerCAmelCase__ , atol=1E-4 ) ) self.assertSequenceEqual(results["labels"].tolist() , lowerCAmelCase__ ) self.assertTrue(torch.allclose(results["boxes"][0, :] , lowerCAmelCase__ ) )
656
'''simple docstring''' from collections import deque from math import floor from random import random from time import time class A_ : """simple docstring""" def __init__( self :Dict ) -> List[str]: '''simple docstring''' snake_case_ : int = {} def _A ( self :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any]=1 ) -> Any: '''simple docstring''' if self.graph.get(lowerCAmelCase__ ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: snake_case_ : Optional[int] = [[w, v]] if not self.graph.get(lowerCAmelCase__ ): snake_case_ : Dict = [] def _A ( self :List[Any] ) -> Optional[int]: '''simple docstring''' return list(self.graph ) def _A ( self :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :int ) -> List[Any]: '''simple docstring''' if self.graph.get(lowerCAmelCase__ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowerCAmelCase__ ) def _A ( self :List[str] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :str=-1 ) -> str: '''simple docstring''' if s == d: return [] snake_case_ : str = [] snake_case_ : Optional[int] = [] if s == -2: snake_case_ : List[Any] = list(self.graph )[0] stack.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) snake_case_ : Dict = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: snake_case_ : str = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowerCAmelCase__ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) snake_case_ : str = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowerCAmelCase__ ) != 0: snake_case_ : Union[str, Any] = stack[len(lowerCAmelCase__ ) - 1] else: snake_case_ : Optional[Any] = ss # check if se have reached the starting point if len(lowerCAmelCase__ ) == 0: return visited def _A ( self :Tuple , lowerCAmelCase__ :int=-1 ) -> int: '''simple docstring''' if c == -1: snake_case_ : Any = floor(random() * 10_000 ) + 10 for i in range(lowerCAmelCase__ ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): snake_case_ : Optional[Any] = floor(random() * c ) + 1 if n != i: self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 ) def _A ( self :Tuple , lowerCAmelCase__ :Dict=-2 ) -> Dict: '''simple docstring''' snake_case_ : Union[str, Any] = deque() snake_case_ : Optional[Any] = [] if s == -2: snake_case_ : Tuple = list(self.graph )[0] d.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) while d: snake_case_ : Optional[int] = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _A ( self :List[str] , lowerCAmelCase__ :str ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Tuple = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def _A ( self :Any , lowerCAmelCase__ :int ) -> Optional[Any]: '''simple docstring''' return len(self.graph[u] ) def _A ( self :Tuple , lowerCAmelCase__ :List[str]=-2 ) -> Optional[Any]: '''simple docstring''' snake_case_ : str = [] snake_case_ : str = [] if s == -2: snake_case_ : Optional[Any] = list(self.graph )[0] stack.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) snake_case_ : int = s snake_case_ : Optional[int] = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: snake_case_ : List[Any] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) snake_case_ : List[str] = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(lowerCAmelCase__ ) != 0: snake_case_ : int = stack[len(lowerCAmelCase__ ) - 1] else: snake_case_ : Union[str, Any] = ss # check if se have reached the starting point if len(lowerCAmelCase__ ) == 0: return sorted_nodes def _A ( self :Dict ) -> Any: '''simple docstring''' snake_case_ : Dict = [] snake_case_ : Any = [] snake_case_ : str = list(self.graph )[0] stack.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) snake_case_ : Optional[int] = -2 snake_case_ : Any = [] snake_case_ : List[Any] = s snake_case_ : int = False snake_case_ : Optional[int] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: snake_case_ : List[Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): snake_case_ : Any = len(lowerCAmelCase__ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) snake_case_ : Optional[int] = node[1] break # check if all the children are visited if s == ss: stack.pop() snake_case_ : Optional[Any] = True if len(lowerCAmelCase__ ) != 0: snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1] else: snake_case_ : str = False indirect_parents.append(lowerCAmelCase__ ) snake_case_ : List[str] = s snake_case_ : Optional[int] = ss # check if se have reached the starting point if len(lowerCAmelCase__ ) == 0: return list(lowerCAmelCase__ ) def _A ( self :Tuple ) -> List[str]: '''simple docstring''' snake_case_ : List[Any] = [] snake_case_ : Tuple = [] snake_case_ : List[str] = list(self.graph )[0] stack.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) snake_case_ : str = -2 snake_case_ : List[str] = [] snake_case_ : List[Any] = s snake_case_ : List[str] = False snake_case_ : Dict = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: snake_case_ : List[Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): snake_case_ : Any = len(lowerCAmelCase__ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) snake_case_ : str = node[1] break # check if all the children are visited if s == ss: stack.pop() snake_case_ : Tuple = True if len(lowerCAmelCase__ ) != 0: snake_case_ : List[Any] = stack[len(lowerCAmelCase__ ) - 1] else: snake_case_ : Optional[int] = False indirect_parents.append(lowerCAmelCase__ ) snake_case_ : int = s snake_case_ : Union[str, Any] = ss # check if se have reached the starting point if len(lowerCAmelCase__ ) == 0: return False def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[int]=-2 , lowerCAmelCase__ :Tuple=-1 ) -> str: '''simple docstring''' snake_case_ : Optional[int] = time() self.dfs(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : Optional[Any] = time() return end - begin def _A ( self :Any , lowerCAmelCase__ :Tuple=-2 ) -> Optional[Any]: '''simple docstring''' snake_case_ : Any = time() self.bfs(lowerCAmelCase__ ) snake_case_ : Any = time() return end - begin class A_ : """simple docstring""" def __init__( self :Tuple ) -> List[str]: '''simple docstring''' snake_case_ : Optional[Any] = {} def _A ( self :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=1 ) -> str: '''simple docstring''' if self.graph.get(lowerCAmelCase__ ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist snake_case_ : str = [[w, v]] # add the other way if self.graph.get(lowerCAmelCase__ ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist snake_case_ : List[str] = [[w, u]] def _A ( self :Dict , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]: '''simple docstring''' if self.graph.get(lowerCAmelCase__ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowerCAmelCase__ ) # the other way round if self.graph.get(lowerCAmelCase__ ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(lowerCAmelCase__ ) def _A ( self :Optional[Any] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> int: '''simple docstring''' if s == d: return [] snake_case_ : Any = [] snake_case_ : Dict = [] if s == -2: snake_case_ : Optional[int] = list(self.graph )[0] stack.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) snake_case_ : Tuple = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: snake_case_ : List[str] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowerCAmelCase__ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) snake_case_ : str = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowerCAmelCase__ ) != 0: snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1] else: snake_case_ : str = ss # check if se have reached the starting point if len(lowerCAmelCase__ ) == 0: return visited def _A ( self :Optional[int] , lowerCAmelCase__ :str=-1 ) -> List[Any]: '''simple docstring''' if c == -1: snake_case_ : Optional[int] = floor(random() * 10_000 ) + 10 for i in range(lowerCAmelCase__ ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): snake_case_ : str = floor(random() * c ) + 1 if n != i: self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 ) def _A ( self :Any , lowerCAmelCase__ :Optional[Any]=-2 ) -> List[Any]: '''simple docstring''' snake_case_ : List[str] = deque() snake_case_ : Optional[Any] = [] if s == -2: snake_case_ : List[Any] = list(self.graph )[0] d.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) while d: snake_case_ : Optional[int] = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _A ( self :str , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]: '''simple docstring''' return len(self.graph[u] ) def _A ( self :Union[str, Any] ) -> Dict: '''simple docstring''' snake_case_ : Any = [] snake_case_ : Optional[Any] = [] snake_case_ : Optional[Any] = list(self.graph )[0] stack.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) snake_case_ : Tuple = -2 snake_case_ : Optional[int] = [] snake_case_ : Tuple = s snake_case_ : Optional[Any] = False snake_case_ : Optional[int] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: snake_case_ : Optional[Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): snake_case_ : Optional[int] = len(lowerCAmelCase__ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) snake_case_ : Tuple = node[1] break # check if all the children are visited if s == ss: stack.pop() snake_case_ : Optional[int] = True if len(lowerCAmelCase__ ) != 0: snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1] else: snake_case_ : Optional[int] = False indirect_parents.append(lowerCAmelCase__ ) snake_case_ : List[Any] = s snake_case_ : Dict = ss # check if se have reached the starting point if len(lowerCAmelCase__ ) == 0: return list(lowerCAmelCase__ ) def _A ( self :Optional[Any] ) -> Tuple: '''simple docstring''' snake_case_ : Optional[Any] = [] snake_case_ : int = [] snake_case_ : List[str] = list(self.graph )[0] stack.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) snake_case_ : Tuple = -2 snake_case_ : int = [] snake_case_ : int = s snake_case_ : Optional[Any] = False snake_case_ : List[Any] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: snake_case_ : Union[str, Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): snake_case_ : Tuple = len(lowerCAmelCase__ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) snake_case_ : Optional[Any] = node[1] break # check if all the children are visited if s == ss: stack.pop() snake_case_ : Optional[Any] = True if len(lowerCAmelCase__ ) != 0: snake_case_ : Tuple = stack[len(lowerCAmelCase__ ) - 1] else: snake_case_ : Optional[int] = False indirect_parents.append(lowerCAmelCase__ ) snake_case_ : Union[str, Any] = s snake_case_ : Tuple = ss # check if se have reached the starting point if len(lowerCAmelCase__ ) == 0: return False def _A ( self :Any ) -> Tuple: '''simple docstring''' return list(self.graph ) def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> str: '''simple docstring''' snake_case_ : List[str] = time() self.dfs(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : List[Any] = time() return end - begin def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any]=-2 ) -> int: '''simple docstring''' snake_case_ : List[str] = time() self.bfs(lowerCAmelCase__ ) snake_case_ : Tuple = time() return end - begin
656
1
'''simple docstring''' from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging __lowerCamelCase : str = logging.get_logger(__name__) class A_ (a_ ): """simple docstring""" a__ = ['''input_features''', '''attention_mask'''] def __init__( self :Optional[Any] , lowerCAmelCase__ :str=80 , lowerCAmelCase__ :Optional[Any]=16_000 , lowerCAmelCase__ :List[str]=80 , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :int=True , lowerCAmelCase__ :int=True , lowerCAmelCase__ :Union[str, Any]=True , **lowerCAmelCase__ :Tuple , ) -> List[str]: '''simple docstring''' super().__init__(feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , **lowerCAmelCase__ ) snake_case_ : List[str] = num_mel_bins snake_case_ : Any = do_ceptral_normalize snake_case_ : Tuple = normalize_means snake_case_ : Optional[int] = normalize_vars snake_case_ : Dict = True def _A ( self :List[Any] , lowerCAmelCase__ :np.ndarray , ) -> np.ndarray: '''simple docstring''' snake_case_ : Optional[int] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers snake_case_ : Dict = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 ) snake_case_ : Union[str, Any] = ta_kaldi.fbank(lowerCAmelCase__ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def _A ( lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[bool] = True , lowerCAmelCase__ :Optional[bool] = True , lowerCAmelCase__ :float = 0.0 , ) -> np.ndarray: '''simple docstring''' if normalize_means: snake_case_ : int = x[:input_length].mean(axis=0 ) snake_case_ : List[str] = np.subtract(lowerCAmelCase__ , lowerCAmelCase__ ) if normalize_vars: snake_case_ : Union[str, Any] = x[:input_length].std(axis=0 ) snake_case_ : int = np.divide(lowerCAmelCase__ , lowerCAmelCase__ ) if input_length < x.shape[0]: snake_case_ : Union[str, Any] = padding_value # make sure array is in float32 snake_case_ : Union[str, Any] = x.astype(np.floataa ) return x def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[np.ndarray] , lowerCAmelCase__ :Optional[np.ndarray] = None ) -> List[np.ndarray]: '''simple docstring''' snake_case_ : int = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(lowerCAmelCase__ , lowerCAmelCase__ , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(lowerCAmelCase__ , lowerCAmelCase__ ) ] def __call__( self :Union[str, Any] , lowerCAmelCase__ :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCAmelCase__ :Union[bool, str, PaddingStrategy] = False , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :Optional[Union[str, TensorType]] = None , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :Optional[bool] = None , **lowerCAmelCase__ :Optional[int] , ) -> BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) snake_case_ : Optional[int] = isinstance(lowerCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) snake_case_ : Union[str, Any] = is_batched_numpy or ( isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: snake_case_ : List[Any] = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ): snake_case_ : Optional[int] = np.asarray(lowerCAmelCase__ , dtype=np.floataa ) elif isinstance(lowerCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): snake_case_ : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: snake_case_ : Tuple = [raw_speech] # extract fbank features snake_case_ : int = [self._extract_fbank_features(lowerCAmelCase__ ) for waveform in raw_speech] # convert into correct format for padding snake_case_ : int = BatchFeature({"input_features": features} ) snake_case_ : Optional[int] = self.pad( lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , ) # make sure list is in array format snake_case_ : str = padded_inputs.get("input_features" ) if isinstance(input_features[0] , lowerCAmelCase__ ): snake_case_ : Tuple = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for feature in input_features] snake_case_ : Any = padded_inputs.get("attention_mask" ) if attention_mask is not None: snake_case_ : str = [np.asarray(lowerCAmelCase__ , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: snake_case_ : int = ( np.array(lowerCAmelCase__ , dtype=np.intaa ) if self._get_padding_strategies(lowerCAmelCase__ , max_length=lowerCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD else None ) snake_case_ : Dict = self.normalize( padded_inputs["input_features"] , attention_mask=lowerCAmelCase__ ) if return_tensors is not None: snake_case_ : List[str] = padded_inputs.convert_to_tensors(lowerCAmelCase__ ) return padded_inputs
656
'''simple docstring''' import gzip import hashlib import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser __lowerCamelCase : List[str] = re.compile(R'''\s+''') def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]: """simple docstring""" return {"hash": hashlib.mda(re.sub(__magic_name__ ,"" ,example["content"] ).encode("utf-8" ) ).hexdigest()} def __UpperCAmelCase ( __magic_name__ )-> str: """simple docstring""" snake_case_ : Optional[Any] = [len(__magic_name__ ) for line in example["content"].splitlines()] return {"line_mean": np.mean(__magic_name__ ), "line_max": max(__magic_name__ )} def __UpperCAmelCase ( __magic_name__ )-> int: """simple docstring""" snake_case_ : Optional[int] = np.mean([c.isalnum() for c in example["content"]] ) return {"alpha_frac": alpha_frac} def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Tuple: """simple docstring""" if example["hash"] in uniques: uniques.remove(example["hash"] ) return True else: return False def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 )-> Tuple: """simple docstring""" snake_case_ : List[str] = ["auto-generated", "autogenerated", "automatically generated"] snake_case_ : Optional[Any] = example["content"].splitlines() for _, line in zip(range(__magic_name__ ) ,__magic_name__ ): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 ,__magic_name__=0.05 )-> Optional[Any]: """simple docstring""" snake_case_ : str = ["unit tests", "test file", "configuration file"] snake_case_ : int = example["content"].splitlines() snake_case_ : Optional[Any] = 0 snake_case_ : Any = 0 # first test for _, line in zip(range(__magic_name__ ) ,__magic_name__ ): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test snake_case_ : Tuple = example["content"].count("\n" ) snake_case_ : int = int(coeff * nlines ) for line in lines: count_config += line.lower().count("config" ) count_test += line.lower().count("test" ) if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def __UpperCAmelCase ( __magic_name__ )-> str: """simple docstring""" snake_case_ : List[Any] = ["def ", "class ", "for ", "while "] snake_case_ : Optional[Any] = example["content"].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def __UpperCAmelCase ( __magic_name__ ,__magic_name__=4 )-> Optional[int]: """simple docstring""" snake_case_ : Tuple = example["content"].splitlines() snake_case_ : Tuple = 0 for line in lines: counter += line.lower().count("=" ) if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def __UpperCAmelCase ( __magic_name__ )-> List[Any]: """simple docstring""" snake_case_ : Tuple = tokenizer(example["content"] ,truncation=__magic_name__ )["input_ids"] snake_case_ : int = len(example["content"] ) / len(__magic_name__ ) return {"ratio": ratio} def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]: """simple docstring""" snake_case_ : Union[str, Any] = {} results.update(get_hash(__magic_name__ ) ) results.update(line_stats(__magic_name__ ) ) results.update(alpha_stats(__magic_name__ ) ) results.update(char_token_ratio(__magic_name__ ) ) results.update(is_autogenerated(__magic_name__ ) ) results.update(is_config_or_test(__magic_name__ ) ) results.update(has_no_keywords(__magic_name__ ) ) results.update(has_few_assignments(__magic_name__ ) ) return results def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Tuple: """simple docstring""" if not check_uniques(__magic_name__ ,__magic_name__ ): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def __UpperCAmelCase ( __magic_name__ )-> Dict: """simple docstring""" with open(__magic_name__ ,"rb" ) as f_in: with gzip.open(str(__magic_name__ ) + ".gz" ,"wb" ,compresslevel=6 ) as f_out: shutil.copyfileobj(__magic_name__ ,__magic_name__ ) os.unlink(__magic_name__ ) # Settings __lowerCamelCase : List[Any] = HfArgumentParser(PreprocessingArguments) __lowerCamelCase : str = parser.parse_args() if args.num_workers is None: __lowerCamelCase : List[Any] = multiprocessing.cpu_count() __lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset __lowerCamelCase : Any = time.time() __lowerCamelCase : str = load_dataset(args.dataset_name, split='''train''') print(f'''Time to load dataset: {time.time()-t_start:.2f}''') # Run preprocessing __lowerCamelCase : List[str] = time.time() __lowerCamelCase : Any = ds.map(preprocess, num_proc=args.num_workers) print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''') # Deduplicate hashes __lowerCamelCase : Any = set(ds.unique('''hash''')) __lowerCamelCase : Optional[int] = len(uniques) / len(ds) print(f'''Fraction of duplicates: {1-frac:.2%}''') # Deduplicate data and apply heuristics __lowerCamelCase : List[str] = time.time() __lowerCamelCase : Tuple = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args}) print(f'''Time to filter dataset: {time.time()-t_start:.2f}''') print(f'''Size of filtered dataset: {len(ds_filter)}''') # Deduplicate with minhash and jaccard similarity if args.near_deduplication: __lowerCamelCase : List[str] = time.time() __lowerCamelCase , __lowerCamelCase : Tuple = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''') print(f'''Size of deduplicate dataset: {len(ds_filter)}''') # Save data in batches of samples_per_file __lowerCamelCase : List[Any] = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / '''duplicate_clusters.json''', '''w''') as f: json.dump(duplicate_clusters, f) __lowerCamelCase : List[str] = output_dir / '''data''' data_dir.mkdir(exist_ok=True) __lowerCamelCase : int = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): __lowerCamelCase : Union[str, Any] = str(data_dir / f'''file-{file_number+1:012}.json''') __lowerCamelCase : List[Any] = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
656
1
'''simple docstring''' from __future__ import annotations import collections import pprint from pathlib import Path def __UpperCAmelCase ( __magic_name__ )-> str: """simple docstring""" return "".join(sorted(__magic_name__ ) ) def __UpperCAmelCase ( __magic_name__ )-> list[str]: """simple docstring""" return word_by_signature[signature(__magic_name__ )] __lowerCamelCase : str = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''') __lowerCamelCase : Optional[int] = sorted({word.strip().lower() for word in data.splitlines()}) __lowerCamelCase : Optional[int] = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": __lowerCamelCase : Any = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open('''anagrams.txt''', '''w''') as file: file.write('''all_anagrams = \n ''') file.write(pprint.pformat(all_anagrams))
656
'''simple docstring''' import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class A_ (unittest.TestCase ): """simple docstring""" def _A ( self :str ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Union[str, Any] = torch.nn.Linear(10 , 10 ) snake_case_ : Dict = torch.optim.SGD(model.parameters() , 0.1 ) snake_case_ : Tuple = Accelerator() snake_case_ : Optional[Any] = accelerator.prepare(lowerCAmelCase__ ) try: pickle.loads(pickle.dumps(lowerCAmelCase__ ) ) except Exception as e: self.fail(F'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
656
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __lowerCamelCase : List[str] = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Tuple = ['''DeiTFeatureExtractor'''] __lowerCamelCase : Optional[Any] = ['''DeiTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Optional[Any] = [ '''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DeiTForImageClassification''', '''DeiTForImageClassificationWithTeacher''', '''DeiTForMaskedImageModeling''', '''DeiTModel''', '''DeiTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = [ '''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDeiTForImageClassification''', '''TFDeiTForImageClassificationWithTeacher''', '''TFDeiTForMaskedImageModeling''', '''TFDeiTModel''', '''TFDeiTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys __lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
656
'''simple docstring''' import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py __lowerCamelCase : Any = '''src/transformers''' # This is to make sure the transformers module imported is the one in the repo. __lowerCamelCase : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS) __lowerCamelCase : Optional[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` __lowerCamelCase : Union[str, Any] = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''') __lowerCamelCase : Any = { '''DecisionTransformerConfig''', '''EncoderDecoderConfig''', '''MusicgenConfig''', '''RagConfig''', '''SpeechEncoderDecoderConfig''', '''TimmBackboneConfig''', '''VisionEncoderDecoderConfig''', '''VisionTextDualEncoderConfig''', '''LlamaConfig''', } def __UpperCAmelCase ( __magic_name__ )-> List[Any]: """simple docstring""" snake_case_ : Tuple = None # source code of `config_class` snake_case_ : List[Any] = inspect.getsource(__magic_name__ ) snake_case_ : List[str] = _re_checkpoint.findall(__magic_name__ ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith("/" ): snake_case_ : Optional[Any] = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link snake_case_ : str = F'''https://huggingface.co/{ckpt_name}''' if ckpt_link == ckpt_link_from_name: snake_case_ : Dict = ckpt_name break return checkpoint def __UpperCAmelCase ( )-> Dict: """simple docstring""" snake_case_ : Optional[int] = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue snake_case_ : str = get_checkpoint_from_config_class(__magic_name__ ) snake_case_ : Union[str, Any] = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(__magic_name__ ) if len(__magic_name__ ) > 0: snake_case_ : Tuple = "\n".join(sorted(__magic_name__ ) ) raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
656
1
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def __UpperCAmelCase ( __magic_name__=None )-> List[str]: """simple docstring""" if subparsers is not None: snake_case_ : List[str] = subparsers.add_parser("test" ) else: snake_case_ : List[Any] = argparse.ArgumentParser("Accelerate test command" ) parser.add_argument( "--config_file" ,default=__magic_name__ ,help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) ,) if subparsers is not None: parser.set_defaults(func=__magic_name__ ) return parser def __UpperCAmelCase ( __magic_name__ )-> Tuple: """simple docstring""" snake_case_ : Optional[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] ) if args.config_file is None: snake_case_ : str = script_name else: snake_case_ : Any = F'''--config_file={args.config_file} {script_name}''' snake_case_ : Union[str, Any] = ["accelerate-launch"] + test_args.split() snake_case_ : Optional[int] = execute_subprocess_async(__magic_name__ ,env=os.environ.copy() ) if result.returncode == 0: print("Test is a success! You are ready for your distributed training!" ) def __UpperCAmelCase ( )-> int: """simple docstring""" snake_case_ : Dict = test_command_parser() snake_case_ : Dict = parser.parse_args() test_command(__magic_name__ ) if __name__ == "__main__": main()
656
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : List[str] = logging.get_logger(__name__) __lowerCamelCase : int = { '''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''', # See all Cvt models at https://huggingface.co/models?filter=cvt } class A_ (a_ ): """simple docstring""" a__ = '''cvt''' def __init__( self :List[Any] , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Any=[7, 3, 3] , lowerCAmelCase__ :Dict=[4, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[2, 1, 1] , lowerCAmelCase__ :Any=[64, 192, 384] , lowerCAmelCase__ :List[str]=[1, 3, 6] , lowerCAmelCase__ :str=[1, 2, 10] , lowerCAmelCase__ :Any=[4.0, 4.0, 4.0] , lowerCAmelCase__ :int=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Optional[Any]=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Dict=[0.0, 0.0, 0.1] , lowerCAmelCase__ :List[Any]=[True, True, True] , lowerCAmelCase__ :List[Any]=[False, False, True] , lowerCAmelCase__ :Dict=["dw_bn", "dw_bn", "dw_bn"] , lowerCAmelCase__ :Any=[3, 3, 3] , lowerCAmelCase__ :Tuple=[1, 1, 1] , lowerCAmelCase__ :Optional[int]=[2, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[1, 1, 1] , lowerCAmelCase__ :Any=[1, 1, 1] , lowerCAmelCase__ :List[str]=0.0_2 , lowerCAmelCase__ :Dict=1E-1_2 , **lowerCAmelCase__ :Optional[Any] , ) -> str: '''simple docstring''' super().__init__(**lowerCAmelCase__ ) snake_case_ : int = num_channels snake_case_ : int = patch_sizes snake_case_ : Optional[Any] = patch_stride snake_case_ : Dict = patch_padding snake_case_ : Tuple = embed_dim snake_case_ : Optional[int] = num_heads snake_case_ : Union[str, Any] = depth snake_case_ : Optional[int] = mlp_ratio snake_case_ : Tuple = attention_drop_rate snake_case_ : str = drop_rate snake_case_ : Tuple = drop_path_rate snake_case_ : Any = qkv_bias snake_case_ : Union[str, Any] = cls_token snake_case_ : int = qkv_projection_method snake_case_ : Any = kernel_qkv snake_case_ : Union[str, Any] = padding_kv snake_case_ : str = stride_kv snake_case_ : Dict = padding_q snake_case_ : Tuple = stride_q snake_case_ : Any = initializer_range snake_case_ : Any = layer_norm_eps
656
1
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class A_ (unittest.TestCase ): """simple docstring""" def __init__( self :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any]=7 , lowerCAmelCase__ :List[str]=3 , lowerCAmelCase__ :Optional[Any]=30 , lowerCAmelCase__ :Optional[Any]=400 , lowerCAmelCase__ :str=True , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :int=[0.5, 0.5, 0.5] , lowerCAmelCase__ :List[Any]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Union[str, Any]=1 / 255 , lowerCAmelCase__ :Dict=True , ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Dict = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333} snake_case_ : int = parent snake_case_ : Optional[int] = batch_size snake_case_ : Tuple = num_channels snake_case_ : Any = min_resolution snake_case_ : Optional[Any] = max_resolution snake_case_ : Optional[int] = do_resize snake_case_ : List[str] = size snake_case_ : Optional[Any] = do_normalize snake_case_ : List[str] = image_mean snake_case_ : Tuple = image_std snake_case_ : Tuple = do_rescale snake_case_ : List[str] = rescale_factor snake_case_ : str = do_pad def _A ( self :Optional[int] ) -> Union[str, Any]: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _A ( self :int , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[int]=False ) -> List[str]: '''simple docstring''' if not batched: snake_case_ : Dict = image_inputs[0] if isinstance(lowerCAmelCase__ , Image.Image ): snake_case_, snake_case_ : Optional[int] = image.size else: snake_case_, snake_case_ : Union[str, Any] = image.shape[1], image.shape[2] if w < h: snake_case_ : Tuple = int(self.size["shortest_edge"] * h / w ) snake_case_ : List[str] = self.size["shortest_edge"] elif w > h: snake_case_ : List[Any] = self.size["shortest_edge"] snake_case_ : Union[str, Any] = int(self.size["shortest_edge"] * w / h ) else: snake_case_ : Any = self.size["shortest_edge"] snake_case_ : str = self.size["shortest_edge"] else: snake_case_ : str = [] for image in image_inputs: snake_case_, snake_case_ : Optional[Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) snake_case_ : str = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0] snake_case_ : int = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class A_ (a_ , unittest.TestCase ): """simple docstring""" a__ = DeformableDetrImageProcessor if is_vision_available() else None def _A ( self :str ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[Any] = DeformableDetrImageProcessingTester(self ) @property def _A ( self :List[str] ) -> Dict: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _A ( self :Dict ) -> Tuple: '''simple docstring''' snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase__ , "image_mean" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "image_std" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_rescale" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "do_pad" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) ) def _A ( self :Optional[int] ) -> Dict: '''simple docstring''' snake_case_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} ) self.assertEqual(image_processor.do_pad , lowerCAmelCase__ ) snake_case_ : str = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__ ) self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} ) self.assertEqual(image_processor.do_pad , lowerCAmelCase__ ) def _A ( self :Optional[Any] ) -> Union[str, Any]: '''simple docstring''' pass def _A ( self :Optional[int] ) -> int: '''simple docstring''' snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input snake_case_ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ ) snake_case_ : Optional[int] = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _A ( self :List[str] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , np.ndarray ) # Test not batched input snake_case_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case_, snake_case_ : str = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_ : Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _A ( self :Any ) -> str: '''simple docstring''' snake_case_ : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor ) # Test not batched input snake_case_ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case_, snake_case_ : Optional[int] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_ : Tuple = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values snake_case_, snake_case_ : List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def _A ( self :Optional[Any] ) -> Tuple: '''simple docstring''' snake_case_ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: snake_case_ : List[str] = json.loads(f.read() ) snake_case_ : str = {"image_id": 39_769, "annotations": target} # encode them snake_case_ : List[str] = DeformableDetrImageProcessor() snake_case_ : Optional[Any] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors="pt" ) # verify pixel values snake_case_ : List[Any] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__ ) snake_case_ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) ) # verify area snake_case_ : int = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__ ) ) # verify boxes snake_case_ : List[str] = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__ ) snake_case_ : int = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3 ) ) # verify image_id snake_case_ : List[str] = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__ ) ) # verify is_crowd snake_case_ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__ ) ) # verify class_labels snake_case_ : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__ ) ) # verify orig_size snake_case_ : List[str] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__ ) ) # verify size snake_case_ : str = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__ ) ) @slow def _A ( self :Optional[int] ) -> Dict: '''simple docstring''' snake_case_ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: snake_case_ : Optional[Any] = json.loads(f.read() ) snake_case_ : Any = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target} snake_case_ : Tuple = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them snake_case_ : Union[str, Any] = DeformableDetrImageProcessor(format="coco_panoptic" ) snake_case_ : List[Any] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors="pt" ) # verify pixel values snake_case_ : Optional[int] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__ ) snake_case_ : List[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) ) # verify area snake_case_ : Optional[int] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__ ) ) # verify boxes snake_case_ : str = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__ ) snake_case_ : int = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3 ) ) # verify image_id snake_case_ : Any = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__ ) ) # verify is_crowd snake_case_ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__ ) ) # verify class_labels snake_case_ : Optional[Any] = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__ ) ) # verify masks snake_case_ : Tuple = 822_873 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCAmelCase__ ) # verify orig_size snake_case_ : int = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__ ) ) # verify size snake_case_ : Tuple = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__ ) )
656
'''simple docstring''' import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets __lowerCamelCase : str = '''\ @inproceedings{snover-etal-2006-study, title = "A Study of Translation Edit Rate with Targeted Human Annotation", author = "Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John", booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers", month = aug # " 8-12", year = "2006", address = "Cambridge, Massachusetts, USA", publisher = "Association for Machine Translation in the Americas", url = "https://aclanthology.org/2006.amta-papers.25", pages = "223--231", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' __lowerCamelCase : Dict = '''\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. ''' __lowerCamelCase : int = ''' Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: \'score\' (float): TER score (num_edits / sum_ref_lengths * 100) \'num_edits\' (int): The cumulative number of edits \'ref_length\' (float): The cumulative average reference length Examples: Example 1: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0} Example 2: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0} Example 3: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5} Example 4: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0} Example 5: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A_ (datasets.Metric ): """simple docstring""" def _A ( self :Optional[Any] ) -> Optional[int]: '''simple docstring''' if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[ "https://github.com/jhclark/tercom", ] , ) def _A ( self :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[str] = len(references[0] ) if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) snake_case_ : List[str] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )] snake_case_ : List[str] = TER( normalized=lowerCAmelCase__ , no_punct=lowerCAmelCase__ , asian_support=lowerCAmelCase__ , case_sensitive=lowerCAmelCase__ , ) snake_case_ : Any = sb_ter.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
656
1
'''simple docstring''' def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> int: """simple docstring""" return int(input_a == input_a == 0 ) def __UpperCAmelCase ( )-> None: """simple docstring""" print("Truth Table of NOR Gate:" ) print("| Input 1 | Input 2 | Output |" ) print(F'''| 0 | 0 | {nor_gate(0 ,0 )} |''' ) print(F'''| 0 | 1 | {nor_gate(0 ,1 )} |''' ) print(F'''| 1 | 0 | {nor_gate(1 ,0 )} |''' ) print(F'''| 1 | 1 | {nor_gate(1 ,1 )} |''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
656
'''simple docstring''' from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def __UpperCAmelCase ( )-> int: """simple docstring""" snake_case_ : Any = { "repo_name": ["test_repo1", "test_repo2", "test_repo3"], "path": ["test_1.py", "test_2.py", "unit_test.py"], "content": ["a " * 20, "a " * 30, "b " * 7], } snake_case_ : int = Dataset.from_dict(__magic_name__ ) return dataset class A_ (a_ ): """simple docstring""" def _A ( self :List[str] ) -> str: '''simple docstring''' snake_case_ : Union[str, Any] = get_dataset() snake_case_ : Optional[int] = make_duplicate_clusters(lowerCAmelCase__ , 0.8_5 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def _A ( self :Union[str, Any] ) -> List[str]: '''simple docstring''' snake_case_ : Optional[int] = get_dataset() snake_case_, snake_case_ : List[Any] = deduplicate_dataset(lowerCAmelCase__ ) self.assertEqual(len(lowerCAmelCase__ ) , 2 ) print(lowerCAmelCase__ ) self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 ) self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , lowerCAmelCase__ )
656
1
'''simple docstring''' import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__=10 )-> Union[str, Any]: """simple docstring""" snake_case_ : Any = [] for _ in range(__magic_name__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def __UpperCAmelCase ( __magic_name__ ,__magic_name__=10 )-> Any: """simple docstring""" snake_case_ : Optional[int] = [] for step in range(__magic_name__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : List[Any] = os.path.join(__magic_name__ ,"schedule.bin" ) torch.save(scheduler.state_dict() ,__magic_name__ ) snake_case_ : Dict = torch.load(__magic_name__ ) scheduler.load_state_dict(__magic_name__ ) return lrs @require_torch class A_ (unittest.TestCase ): """simple docstring""" def _A ( self :str , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int ) -> Optional[int]: '''simple docstring''' self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) ) for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__ ): self.assertAlmostEqual(lowerCAmelCase__ , lowerCAmelCase__ , delta=lowerCAmelCase__ ) def _A ( self :Any ) -> Dict: '''simple docstring''' snake_case_ : str = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase__ ) snake_case_ : Optional[int] = torch.tensor([0.4, 0.2, -0.5] ) snake_case_ : Any = nn.MSELoss() # No warmup, constant schedule, no gradient clipping snake_case_ : str = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 ) for _ in range(100 ): snake_case_ : List[str] = criterion(lowerCAmelCase__ , lowerCAmelCase__ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) def _A ( self :Optional[int] ) -> Tuple: '''simple docstring''' snake_case_ : Optional[Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase__ ) snake_case_ : Any = torch.tensor([0.4, 0.2, -0.5] ) snake_case_ : str = nn.MSELoss() # No warmup, constant schedule, no gradient clipping snake_case_ : int = Adafactor( params=[w] , lr=1E-2 , eps=(1E-3_0, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowerCAmelCase__ , weight_decay=0.0 , relative_step=lowerCAmelCase__ , scale_parameter=lowerCAmelCase__ , warmup_init=lowerCAmelCase__ , ) for _ in range(1_000 ): snake_case_ : Union[str, Any] = criterion(lowerCAmelCase__ , lowerCAmelCase__ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) @require_torch class A_ (unittest.TestCase ): """simple docstring""" a__ = nn.Linear(50 , 50 ) if is_torch_available() else None a__ = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None a__ = 10 def _A ( self :Dict , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=None ) -> Dict: '''simple docstring''' self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) ) for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__ ): self.assertAlmostEqual(lowerCAmelCase__ , lowerCAmelCase__ , delta=lowerCAmelCase__ , msg=lowerCAmelCase__ ) def _A ( self :str ) -> List[str]: '''simple docstring''' snake_case_ : str = {"num_warmup_steps": 2, "num_training_steps": 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) snake_case_ : Optional[Any] = { get_constant_schedule: ({}, [1_0.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"num_warmup_steps": 4}, [0.0, 2.5, 5.0, 7.5, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 1_0.0, 8.7_5, 7.5, 6.2_5, 5.0, 3.7_5, 2.5, 1.2_5], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 1_0.0, 9.6_1, 8.5_3, 6.9_1, 5.0, 3.0_8, 1.4_6, 0.3_8], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, "num_cycles": 2}, [0.0, 5.0, 1_0.0, 8.5_3, 5.0, 1.4_6, 1_0.0, 8.5_3, 5.0, 1.4_6], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, "power": 2.0, "lr_end": 1E-7}, [0.0, 5.0, 1_0.0, 7.6_5_6, 5.6_2_5, 3.9_0_6, 2.5, 1.4_0_6, 0.6_2_5, 0.1_5_6], ), get_inverse_sqrt_schedule: ( {"num_warmup_steps": 2}, [0.0, 5.0, 1_0.0, 8.1_6_5, 7.0_7_1, 6.3_2_5, 5.7_7_4, 5.3_4_5, 5.0, 4.7_1_4], ), } for scheduler_func, data in scheds.items(): snake_case_, snake_case_ : Optional[int] = data snake_case_ : Optional[Any] = scheduler_func(self.optimizer , **lowerCAmelCase__ ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) snake_case_ : Optional[int] = unwrap_schedule(lowerCAmelCase__ , self.num_steps ) self.assertListAlmostEqual( lowerCAmelCase__ , lowerCAmelCase__ , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , ) snake_case_ : Dict = scheduler_func(self.optimizer , **lowerCAmelCase__ ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(lowerCAmelCase__ ) # wrap to test picklability of the schedule snake_case_ : Union[str, Any] = unwrap_and_save_reload_schedule(lowerCAmelCase__ , self.num_steps ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ , msg=F'''failed for {scheduler_func} in save and reload''' ) class A_ : """simple docstring""" def __init__( self :str , lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]: '''simple docstring''' snake_case_ : Dict = fn def __call__( self :Tuple , *lowerCAmelCase__ :str , **lowerCAmelCase__ :Optional[int] ) -> List[str]: '''simple docstring''' return self.fn(*lowerCAmelCase__ , **lowerCAmelCase__ ) @classmethod def _A ( self :Tuple , lowerCAmelCase__ :int ) -> Dict: '''simple docstring''' snake_case_ : Any = list(map(self , scheduler.lr_lambdas ) )
656
'''simple docstring''' from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable __lowerCamelCase : Dict = { '''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''], '''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : int = [ '''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTNeoXJapaneseForCausalLM''', '''GPTNeoXJapaneseLayer''', '''GPTNeoXJapaneseModel''', '''GPTNeoXJapanesePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys __lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
656
1
'''simple docstring''' def __UpperCAmelCase ( __magic_name__ = 10**12 )-> int: """simple docstring""" snake_case_ : Dict = 1 snake_case_ : str = 0 snake_case_ : Dict = 1 snake_case_ : Union[str, Any] = 1 while numerator <= 2 * min_total - 1: prev_numerator += 2 * numerator numerator += 2 * prev_numerator prev_denominator += 2 * denominator denominator += 2 * prev_denominator return (denominator + 1) // 2 if __name__ == "__main__": print(f'''{solution() = }''')
656
'''simple docstring''' def __UpperCAmelCase ( __magic_name__ )-> list[int]: """simple docstring""" if length <= 0 or not isinstance(__magic_name__ ,__magic_name__ ): raise ValueError("Length must be a positive integer." ) return [n * (2 * n - 1) for n in range(__magic_name__ )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
656
1
'''simple docstring''' import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) __lowerCamelCase : int = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='''relu''') ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation='''relu''')) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation='''relu''')) classifier.add(layers.Dense(units=1, activation='''sigmoid''')) # Compiling the CNN classifier.compile( optimizer='''adam''', loss='''binary_crossentropy''', metrics=['''accuracy'''] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') __lowerCamelCase : List[str] = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) __lowerCamelCase : Optional[int] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) __lowerCamelCase : List[Any] = train_datagen.flow_from_directory( '''dataset/training_set''', target_size=(64, 64), batch_size=32, class_mode='''binary''' ) __lowerCamelCase : Tuple = test_datagen.flow_from_directory( '''dataset/test_set''', target_size=(64, 64), batch_size=32, class_mode='''binary''' ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save('''cnn.h5''') # Part 3 - Making new predictions __lowerCamelCase : Optional[Any] = tf.keras.preprocessing.image.load_img( '''dataset/single_prediction/image.png''', target_size=(64, 64) ) __lowerCamelCase : Union[str, Any] = tf.keras.preprocessing.image.img_to_array(test_image) __lowerCamelCase : Optional[int] = np.expand_dims(test_image, axis=0) __lowerCamelCase : Optional[int] = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: __lowerCamelCase : Union[str, Any] = '''Normal''' if result[0][0] == 1: __lowerCamelCase : Optional[int] = '''Abnormality detected'''
656
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def __UpperCAmelCase ( __magic_name__=None )-> List[str]: """simple docstring""" if subparsers is not None: snake_case_ : List[str] = subparsers.add_parser("test" ) else: snake_case_ : List[Any] = argparse.ArgumentParser("Accelerate test command" ) parser.add_argument( "--config_file" ,default=__magic_name__ ,help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) ,) if subparsers is not None: parser.set_defaults(func=__magic_name__ ) return parser def __UpperCAmelCase ( __magic_name__ )-> Tuple: """simple docstring""" snake_case_ : Optional[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] ) if args.config_file is None: snake_case_ : str = script_name else: snake_case_ : Any = F'''--config_file={args.config_file} {script_name}''' snake_case_ : Union[str, Any] = ["accelerate-launch"] + test_args.split() snake_case_ : Optional[int] = execute_subprocess_async(__magic_name__ ,env=os.environ.copy() ) if result.returncode == 0: print("Test is a success! You are ready for your distributed training!" ) def __UpperCAmelCase ( )-> int: """simple docstring""" snake_case_ : Dict = test_command_parser() snake_case_ : Dict = parser.parse_args() test_command(__magic_name__ ) if __name__ == "__main__": main()
656
1
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __lowerCamelCase : str = logging.getLogger(__name__) @dataclass class A_ : """simple docstring""" a__ = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) a__ = field( default=a_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) a__ = field( default='''NER''' , metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} ) a__ = field( default=a_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) a__ = field(default=a_ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. a__ = field( default=a_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) @dataclass class A_ : """simple docstring""" a__ = field( metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} ) a__ = field( default=a_ , metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''} , ) a__ = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) a__ = field( default=a_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def __UpperCAmelCase ( )-> List[str]: """simple docstring""" snake_case_ : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. snake_case_, snake_case_, snake_case_ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: snake_case_, snake_case_, snake_case_ : List[Any] = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' " --overwrite_output_dir to overcome." ) snake_case_ : str = import_module("tasks" ) try: snake_case_ : Dict = getattr(__magic_name__ ,model_args.task_type ) snake_case_ : TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( F'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. ''' F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1 ) ,training_args.fpaa ,) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s" ,__magic_name__ ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task snake_case_ : Any = token_classification_task.get_labels(data_args.labels ) snake_case_ : Dict[int, str] = dict(enumerate(__magic_name__ ) ) snake_case_ : List[str] = len(__magic_name__ ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. snake_case_ : List[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=__magic_name__ ,idalabel=__magic_name__ ,labelaid={label: i for i, label in enumerate(__magic_name__ )} ,cache_dir=model_args.cache_dir ,) snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast ,) snake_case_ : Any = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path ,from_tf=bool(".ckpt" in model_args.model_name_or_path ) ,config=__magic_name__ ,cache_dir=model_args.cache_dir ,) # Get datasets snake_case_ : str = ( TokenClassificationDataset( token_classification_task=__magic_name__ ,data_dir=data_args.data_dir ,tokenizer=__magic_name__ ,labels=__magic_name__ ,model_type=config.model_type ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,) if training_args.do_train else None ) snake_case_ : List[str] = ( TokenClassificationDataset( token_classification_task=__magic_name__ ,data_dir=data_args.data_dir ,tokenizer=__magic_name__ ,labels=__magic_name__ ,model_type=config.model_type ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,) if training_args.do_eval else None ) def align_predictions(__magic_name__ ,__magic_name__ ) -> Tuple[List[int], List[int]]: snake_case_ : Dict = np.argmax(__magic_name__ ,axis=2 ) snake_case_, snake_case_ : Dict = preds.shape snake_case_ : Tuple = [[] for _ in range(__magic_name__ )] snake_case_ : Tuple = [[] for _ in range(__magic_name__ )] for i in range(__magic_name__ ): for j in range(__magic_name__ ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(__magic_name__ ) -> Dict: snake_case_, snake_case_ : int = align_predictions(p.predictions ,p.label_ids ) return { "accuracy_score": accuracy_score(__magic_name__ ,__magic_name__ ), "precision": precision_score(__magic_name__ ,__magic_name__ ), "recall": recall_score(__magic_name__ ,__magic_name__ ), "f1": fa_score(__magic_name__ ,__magic_name__ ), } # Data collator snake_case_ : Optional[int] = DataCollatorWithPadding(__magic_name__ ,pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer snake_case_ : Tuple = Trainer( model=__magic_name__ ,args=__magic_name__ ,train_dataset=__magic_name__ ,eval_dataset=__magic_name__ ,compute_metrics=__magic_name__ ,data_collator=__magic_name__ ,) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation snake_case_ : Dict = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) snake_case_ : Tuple = trainer.evaluate() snake_case_ : int = os.path.join(training_args.output_dir ,"eval_results.txt" ) if trainer.is_world_process_zero(): with open(__magic_name__ ,"w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(" %s = %s" ,__magic_name__ ,__magic_name__ ) writer.write("%s = %s\n" % (key, value) ) results.update(__magic_name__ ) # Predict if training_args.do_predict: snake_case_ : List[str] = TokenClassificationDataset( token_classification_task=__magic_name__ ,data_dir=data_args.data_dir ,tokenizer=__magic_name__ ,labels=__magic_name__ ,model_type=config.model_type ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.test ,) snake_case_, snake_case_, snake_case_ : str = trainer.predict(__magic_name__ ) snake_case_, snake_case_ : List[Any] = align_predictions(__magic_name__ ,__magic_name__ ) snake_case_ : Optional[Any] = os.path.join(training_args.output_dir ,"test_results.txt" ) if trainer.is_world_process_zero(): with open(__magic_name__ ,"w" ) as writer: for key, value in metrics.items(): logger.info(" %s = %s" ,__magic_name__ ,__magic_name__ ) writer.write("%s = %s\n" % (key, value) ) # Save predictions snake_case_ : Optional[Any] = os.path.join(training_args.output_dir ,"test_predictions.txt" ) if trainer.is_world_process_zero(): with open(__magic_name__ ,"w" ) as writer: with open(os.path.join(data_args.data_dir ,"test.txt" ) ,"r" ) as f: token_classification_task.write_predictions_to_file(__magic_name__ ,__magic_name__ ,__magic_name__ ) return results def __UpperCAmelCase ( __magic_name__ )-> Dict: """simple docstring""" main() if __name__ == "__main__": main()
656
'''simple docstring''' from scipy.stats import spearmanr import datasets __lowerCamelCase : str = ''' The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. ''' __lowerCamelCase : int = ''' Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {\'spearmanr\': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results[\'spearmanr\']) -0.7 >>> print(round(results[\'spearmanr_pvalue\'], 2)) 0.19 ''' __lowerCamelCase : List[str] = R'''\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A_ (datasets.Metric ): """simple docstring""" def _A ( self :str ) -> Union[str, Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , ) def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any]=False ) -> List[Any]: '''simple docstring''' snake_case_ : Optional[Any] = spearmanr(lowerCAmelCase__ , lowerCAmelCase__ ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
656
1
'''simple docstring''' from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def __UpperCAmelCase ( )-> Dict: """simple docstring""" snake_case_ : Optional[int] = HfArgumentParser(__magic_name__ ) snake_case_ : str = parser.parse_args_into_dataclasses()[0] snake_case_ : Tuple = TensorFlowBenchmark(args=__magic_name__ ) try: snake_case_ : Optional[Any] = parser.parse_args_into_dataclasses()[0] except ValueError as e: snake_case_ : Dict = "Arg --no_{0} is no longer used, please use --no-{0} instead." snake_case_ : Tuple = " ".join(str(__magic_name__ ).split(" " )[:-1] ) snake_case_ : Any = "" snake_case_ : Tuple = eval(str(__magic_name__ ).split(" " )[-1] ) snake_case_ : List[str] = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__magic_name__ ) if len(__magic_name__ ) > 0: snake_case_ : Union[str, Any] = full_error_msg + begin_error_msg + str(__magic_name__ ) raise ValueError(__magic_name__ ) benchmark.run() if __name__ == "__main__": main()
656
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): __lowerCamelCase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right __lowerCamelCase : str = 128022 __lowerCamelCase : List[Any] = 128028 @require_sentencepiece class A_ (a_ , unittest.TestCase ): """simple docstring""" a__ = MaMaaaTokenizer a__ = False a__ = False a__ = True def _A ( self :Union[str, Any] ) -> List[str]: '''simple docstring''' super().setUp() snake_case_ : int = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"] snake_case_ : Any = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) snake_case_ : Optional[int] = Path(self.tmpdirname ) save_json(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["spm_file"] ) snake_case_ : Union[str, Any] = MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def _A ( self :List[Any] , **lowerCAmelCase__ :List[Any] ) -> str: '''simple docstring''' return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def _A ( self :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[int]: '''simple docstring''' return ( "This is a test", "This is a test", ) def _A ( self :List[str] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : str = "</s>" snake_case_ : Union[str, Any] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ ) def _A ( self :Union[str, Any] ) -> List[str]: '''simple docstring''' snake_case_ : Union[str, Any] = self.get_tokenizer() snake_case_ : Any = list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "</s>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "<s>" ) self.assertEqual(len(lowerCAmelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip("Skip this test while all models are still to be uploaded." ) def _A ( self :List[Any] ) -> Union[str, Any]: '''simple docstring''' pass def _A ( self :Optional[int] ) -> int: '''simple docstring''' snake_case_ : int = self.get_tokenizer() snake_case_ : List[str] = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [2, 3, 4, 5, 6] , ) snake_case_ : Any = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) snake_case_ : Any = tokenizer.convert_tokens_to_string(lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , "This is a test" ) @slow def _A ( self :Any ) -> List[Any]: '''simple docstring''' snake_case_ : int = {"input_ids": [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase__ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , ) @require_torch @require_sentencepiece @require_tokenizers class A_ (unittest.TestCase ): """simple docstring""" a__ = '''facebook/m2m100_418M''' a__ = [ '''In my opinion, there are two levels of response from the French government.''', '''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''', ] a__ = [ '''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''', '''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''', ] # fmt: off a__ = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2] @classmethod def _A ( cls :str ) -> int: '''simple docstring''' snake_case_ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en" , tgt_lang="fr" ) snake_case_ : List[str] = 1 return cls def _A ( self :Tuple ) -> Union[str, Any]: '''simple docstring''' self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 128_006 ) self.assertEqual(self.tokenizer.get_lang_id("en" ) , 128_022 ) self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 128_076 ) self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 128_063 ) def _A ( self :Optional[int] ) -> List[str]: '''simple docstring''' snake_case_ : Dict = self.tokenizer.get_vocab() self.assertEqual(len(lowerCAmelCase__ ) , self.tokenizer.vocab_size ) self.assertEqual(vocab["<unk>"] , 3 ) self.assertIn(self.tokenizer.get_lang_token("en" ) , lowerCAmelCase__ ) def _A ( self :Any ) -> Dict: '''simple docstring''' snake_case_ : List[str] = "en" snake_case_ : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ ) def _A ( self :Union[str, Any] ) -> Dict: '''simple docstring''' self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids ) # fmt: off snake_case_ : Dict = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2] # fmt: on snake_case_ : List[str] = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) snake_case_ : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ ) def _A ( self :Tuple ) -> Tuple: '''simple docstring''' snake_case_ : Union[str, Any] = tempfile.mkdtemp() snake_case_ : int = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(lowerCAmelCase__ ) snake_case_ : List[str] = MaMaaaTokenizer.from_pretrained(lowerCAmelCase__ ) self.assertDictEqual(new_tok.lang_token_to_id , lowerCAmelCase__ ) @require_torch def _A ( self :Optional[Any] ) -> str: '''simple docstring''' snake_case_ : Union[str, Any] = "en" snake_case_ : Tuple = "fr" snake_case_ : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors="pt" ) snake_case_ : Dict = shift_tokens_right( batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: snake_case_ : str = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def _A ( self :Optional[Any] ) -> Tuple: '''simple docstring''' snake_case_ : List[str] = "mr" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) snake_case_ : int = "zh" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def _A ( self :str ) -> int: '''simple docstring''' snake_case_ : Dict = "mr" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) snake_case_ : Tuple = "zh" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def _A ( self :Optional[Any] ) -> Optional[int]: '''simple docstring''' snake_case_ : Optional[int] = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { # en_XX, A, test, EOS "input_ids": [[128_022, 58, 4_183, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 128_006, } , )
656
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor __lowerCamelCase : str = logging.get_logger(__name__) class A_ (a_ ): """simple docstring""" def __init__( self :Union[str, Any] , *lowerCAmelCase__ :Any , **lowerCAmelCase__ :Dict ) -> None: '''simple docstring''' warnings.warn( "The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use PoolFormerImageProcessor instead." , lowerCAmelCase__ , ) super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
656
'''simple docstring''' import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py __lowerCamelCase : str = '''.''' # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) __lowerCamelCase : Tuple = [ '''Assert''', '''AssignVariableOp''', '''EmptyTensorList''', '''MergeV2Checkpoints''', '''ReadVariableOp''', '''ResourceGather''', '''RestoreV2''', '''SaveV2''', '''ShardedFilename''', '''StatefulPartitionedCall''', '''StaticRegexFullMatch''', '''VarHandleOp''', ] def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[str]: """simple docstring""" snake_case_ : Tuple = SavedModel() snake_case_ : Dict = [] with open(os.path.join(__magic_name__ ,"utils" ,"tf_ops" ,"onnx.json" ) ) as f: snake_case_ : Dict = json.load(__magic_name__ )["opsets"] for i in range(1 ,opset + 1 ): onnx_ops.extend(onnx_opsets[str(__magic_name__ )] ) with open(__magic_name__ ,"rb" ) as f: saved_model.ParseFromString(f.read() ) snake_case_ : Tuple = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want snake_case_ : str = sorted(__magic_name__ ) snake_case_ : Optional[int] = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(__magic_name__ ) if strict and len(__magic_name__ ) > 0: raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops ) elif len(__magic_name__ ) > 0: print(F'''Found the following incompatible ops for the opset {opset}:''' ) print(*__magic_name__ ,sep="\n" ) else: print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' ) if __name__ == "__main__": __lowerCamelCase : Any = argparse.ArgumentParser() parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''') parser.add_argument( '''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.''' ) parser.add_argument( '''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.''' ) parser.add_argument( '''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)''' ) __lowerCamelCase : Dict = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
656
1
'''simple docstring''' import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Optional[Any]: """simple docstring""" snake_case_ : int = XCLIPTextConfig() # derive patch size from model name snake_case_ : List[str] = model_name.find("patch" ) snake_case_ : Optional[int] = int(model_name[start_idx + len("patch" ) : start_idx + len("patch" ) + 2] ) snake_case_ : str = XCLIPVisionConfig(patch_size=__magic_name__ ,num_frames=__magic_name__ ) if "large" in model_name: snake_case_ : int = 768 snake_case_ : List[str] = 3072 snake_case_ : Any = 12 snake_case_ : Union[str, Any] = 1024 snake_case_ : int = 4096 snake_case_ : Optional[int] = 16 snake_case_ : str = 24 snake_case_ : List[Any] = 768 snake_case_ : int = 3072 if model_name == "xclip-large-patch14-16-frames": snake_case_ : List[Any] = 336 snake_case_ : Union[str, Any] = XCLIPConfig.from_text_vision_configs(__magic_name__ ,__magic_name__ ) if "large" in model_name: snake_case_ : int = 768 return config def __UpperCAmelCase ( __magic_name__ )-> Tuple: """simple docstring""" if name == "token_embedding.weight": snake_case_ : Union[str, Any] = name.replace("token_embedding.weight" ,"text_model.embeddings.token_embedding.weight" ) if name == "positional_embedding": snake_case_ : List[str] = name.replace("positional_embedding" ,"text_model.embeddings.position_embedding.weight" ) if "ln_1" in name: snake_case_ : Optional[Any] = name.replace("ln_1" ,"layer_norm1" ) if "ln_2" in name: snake_case_ : int = name.replace("ln_2" ,"layer_norm2" ) if "c_fc" in name: snake_case_ : Tuple = name.replace("c_fc" ,"fc1" ) if "c_proj" in name: snake_case_ : Dict = name.replace("c_proj" ,"fc2" ) if name.startswith("transformer.resblocks" ): snake_case_ : Union[str, Any] = name.replace("transformer.resblocks" ,"text_model.encoder.layers" ) if "attn.out_proj" in name and "message" not in name: snake_case_ : List[Any] = name.replace("attn.out_proj" ,"self_attn.out_proj" ) if "ln_final" in name: snake_case_ : Dict = name.replace("ln_final" ,"text_model.final_layer_norm" ) # visual encoder if name == "visual.class_embedding": snake_case_ : List[Any] = name.replace("visual.class_embedding" ,"vision_model.embeddings.class_embedding" ) if name == "visual.positional_embedding": snake_case_ : Optional[Any] = name.replace("visual.positional_embedding" ,"vision_model.embeddings.position_embedding.weight" ) if name.startswith("visual.transformer.resblocks" ): snake_case_ : str = name.replace("visual.transformer.resblocks" ,"vision_model.encoder.layers" ) if "visual.conv1" in name: snake_case_ : Union[str, Any] = name.replace("visual.conv1" ,"vision_model.embeddings.patch_embedding" ) if "visual.ln_pre" in name: snake_case_ : int = name.replace("visual.ln_pre" ,"vision_model.pre_layernorm" ) if "visual.ln_post" in name: snake_case_ : Optional[int] = name.replace("visual.ln_post" ,"vision_model.post_layernorm" ) if "visual.proj" in name: snake_case_ : str = name.replace("visual.proj" ,"visual_projection.weight" ) if "text_projection" in name: snake_case_ : Optional[Any] = name.replace("text_projection" ,"text_projection.weight" ) # things on top if "prompts_visual_proj" in name: snake_case_ : List[Any] = name.replace("prompts_visual_proj" ,"prompts_visual_projection" ) if "prompts_visual_ln" in name: snake_case_ : Optional[Any] = name.replace("prompts_visual_ln" ,"prompts_visual_layernorm" ) # mit if name == "mit.positional_embedding": snake_case_ : Union[str, Any] = name.replace("positional" ,"position" ) if name.startswith("mit.resblocks" ): snake_case_ : int = name.replace("mit.resblocks" ,"mit.encoder.layers" ) # prompts generator if name.startswith("prompts_generator.norm" ): snake_case_ : Optional[int] = name.replace("prompts_generator.norm" ,"prompts_generator.layernorm" ) return name def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Optional[Any]: """simple docstring""" for key in orig_state_dict.copy().keys(): snake_case_ : List[Any] = orig_state_dict.pop(__magic_name__ ) if "attn.in_proj" in key: snake_case_ : int = key.split("." ) if key.startswith("visual" ): snake_case_ : str = key_split[3] snake_case_ : List[Any] = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: snake_case_ : List[str] = val[ :dim, : ] snake_case_ : Optional[int] = val[ dim : dim * 2, : ] snake_case_ : Tuple = val[ -dim:, : ] else: snake_case_ : Any = val[ :dim ] snake_case_ : Optional[int] = val[ dim : dim * 2 ] snake_case_ : Union[str, Any] = val[ -dim: ] else: if "weight" in key: snake_case_ : List[str] = val[ :dim, : ] snake_case_ : str = val[ dim : dim * 2, : ] snake_case_ : Any = val[ -dim:, : ] else: snake_case_ : Tuple = val[:dim] snake_case_ : Optional[int] = val[ dim : dim * 2 ] snake_case_ : Optional[Any] = val[-dim:] elif key.startswith("mit" ): snake_case_ : int = key_split[2] snake_case_ : int = config.vision_config.mit_hidden_size if "weight" in key: snake_case_ : Dict = val[:dim, :] snake_case_ : List[str] = val[dim : dim * 2, :] snake_case_ : List[Any] = val[-dim:, :] else: snake_case_ : Optional[int] = val[:dim] snake_case_ : Any = val[dim : dim * 2] snake_case_ : Optional[Any] = val[-dim:] else: snake_case_ : Optional[Any] = key_split[2] snake_case_ : int = config.text_config.hidden_size if "weight" in key: snake_case_ : str = val[:dim, :] snake_case_ : int = val[ dim : dim * 2, : ] snake_case_ : Union[str, Any] = val[-dim:, :] else: snake_case_ : Optional[int] = val[:dim] snake_case_ : Any = val[ dim : dim * 2 ] snake_case_ : str = val[-dim:] else: snake_case_ : Any = rename_key(__magic_name__ ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: snake_case_ : str = val.T snake_case_ : Any = val return orig_state_dict def __UpperCAmelCase ( __magic_name__ )-> Tuple: """simple docstring""" if num_frames == 8: snake_case_ : str = "eating_spaghetti_8_frames.npy" elif num_frames == 16: snake_case_ : Union[str, Any] = "eating_spaghetti.npy" elif num_frames == 32: snake_case_ : Any = "eating_spaghetti_32_frames.npy" snake_case_ : str = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video" ,filename=__magic_name__ ,repo_type="dataset" ,) snake_case_ : Union[str, Any] = np.load(__magic_name__ ) return list(__magic_name__ ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__=None ,__magic_name__=False )-> str: """simple docstring""" snake_case_ : Any = { # fully supervised kinetics-400 checkpoints "xclip-base-patch32": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth", "xclip-base-patch32-16-frames": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth" ), "xclip-base-patch16": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth", "xclip-base-patch16-16-frames": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth" ), "xclip-large-patch14": "https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb", "xclip-large-patch14-16-frames": "https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f", # fully supervised kinetics-600 checkpoints "xclip-base-patch16-kinetics-600": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth" ), "xclip-base-patch16-kinetics-600-16-frames": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth" ), "xclip-large-patch14-kinetics-600": "https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be", # few shot "xclip-base-patch16-hmdb-2-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth" ), "xclip-base-patch16-hmdb-4-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth" ), "xclip-base-patch16-hmdb-8-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth" ), "xclip-base-patch16-hmdb-16-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth" ), "xclip-base-patch16-ucf-2-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth" ), "xclip-base-patch16-ucf-4-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth" ), "xclip-base-patch16-ucf-8-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth" ), "xclip-base-patch16-ucf-16-shot": ( "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth" ), # zero shot "xclip-base-patch16-zero-shot": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth", } snake_case_ : Union[str, Any] = model_to_url[model_name] snake_case_ : Optional[int] = 8 if "16-frames" in model_name: snake_case_ : Union[str, Any] = 16 elif "shot" in model_name: snake_case_ : Tuple = 32 snake_case_ : Optional[int] = get_xclip_config(__magic_name__ ,__magic_name__ ) snake_case_ : int = XCLIPModel(__magic_name__ ) model.eval() if "drive" in checkpoint_url: snake_case_ : Optional[int] = "pytorch_model.bin" gdown.cached_download(__magic_name__ ,__magic_name__ ,quiet=__magic_name__ ) snake_case_ : List[str] = torch.load(__magic_name__ ,map_location="cpu" )["model"] else: snake_case_ : str = torch.hub.load_state_dict_from_url(__magic_name__ )["model"] snake_case_ : Optional[Any] = convert_state_dict(__magic_name__ ,__magic_name__ ) snake_case_ : Union[str, Any] = XCLIPModel(__magic_name__ ) snake_case_, snake_case_ : Optional[int] = model.load_state_dict(__magic_name__ ,strict=__magic_name__ ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() snake_case_ : str = 336 if model_name == "xclip-large-patch14-16-frames" else 224 snake_case_ : str = VideoMAEImageProcessor(size=__magic_name__ ) snake_case_ : List[str] = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32" ) snake_case_ : Optional[int] = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32" ) snake_case_ : Dict = XCLIPProcessor(image_processor=__magic_name__ ,tokenizer=__magic_name__ ) snake_case_ : Union[str, Any] = prepare_video(__magic_name__ ) snake_case_ : Dict = processor( text=["playing sports", "eating spaghetti", "go shopping"] ,videos=__magic_name__ ,return_tensors="pt" ,padding=__magic_name__ ) print("Shape of pixel values:" ,inputs.pixel_values.shape ) with torch.no_grad(): snake_case_ : str = model(**__magic_name__ ) # Verify outputs snake_case_ : List[str] = outputs.logits_per_video snake_case_ : Optional[int] = logits_per_video.softmax(dim=1 ) print("Probs:" ,__magic_name__ ) # kinetics-400 if model_name == "xclip-base-patch32": snake_case_ : List[str] = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] ) elif model_name == "xclip-base-patch32-16-frames": snake_case_ : Any = torch.tensor([[7.09_99E-04, 9.98_83E-01, 4.55_80E-04]] ) elif model_name == "xclip-base-patch16": snake_case_ : Tuple = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] ) elif model_name == "xclip-base-patch16-16-frames": snake_case_ : Optional[int] = torch.tensor([[7.69_37E-04, 9.97_28E-01, 1.94_73E-03]] ) elif model_name == "xclip-large-patch14": snake_case_ : int = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] ) elif model_name == "xclip-large-patch14-16-frames": snake_case_ : List[str] = torch.tensor([[3.38_77E-04, 9.99_37E-01, 2.88_88E-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": snake_case_ : Union[str, Any] = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": snake_case_ : List[Any] = torch.tensor([[3.85_54E-04, 9.99_29E-01, 3.27_54E-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": snake_case_ : List[str] = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": snake_case_ : Dict = torch.tensor([[7.18_90E-06, 9.99_94E-01, 5.65_59E-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": snake_case_ : Tuple = torch.tensor([[1.03_20E-05, 9.99_93E-01, 6.24_35E-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": snake_case_ : Optional[int] = torch.tensor([[4.13_77E-06, 9.99_90E-01, 9.83_86E-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": snake_case_ : List[Any] = torch.tensor([[4.13_47E-05, 9.99_62E-01, 3.34_11E-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": snake_case_ : Any = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": snake_case_ : Dict = torch.tensor([[8.58_57E-05, 9.99_28E-01, 6.32_91E-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": snake_case_ : Dict = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": snake_case_ : Optional[Any] = torch.tensor([[9.82_19E-04, 9.95_93E-01, 3.08_63E-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": snake_case_ : Union[str, Any] = torch.tensor([[3.50_82E-04, 9.97_85E-01, 1.79_66E-03]] ) else: raise ValueError(F'''Model name {model_name} not supported''' ) assert torch.allclose(__magic_name__ ,__magic_name__ ,atol=1E-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__magic_name__ ) if push_to_hub: print("Pushing model, processor and slow tokenizer files to the hub..." ) model.push_to_hub(__magic_name__ ,organization="nielsr" ) processor.push_to_hub(__magic_name__ ,organization="nielsr" ) slow_tokenizer.push_to_hub(__magic_name__ ,organization="nielsr" ) if __name__ == "__main__": __lowerCamelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''xclip-base-patch32''', type=str, help='''Name of the model.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) __lowerCamelCase : str = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
656
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal __lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__) __lowerCamelCase : List[str] = ['''names''', '''prefix'''] __lowerCamelCase : int = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols'''] __lowerCamelCase : str = ['''encoding_errors''', '''on_bad_lines'''] __lowerCamelCase : Optional[Any] = ['''date_format'''] @dataclass class A_ (datasets.BuilderConfig ): """simple docstring""" a__ = "," a__ = None a__ = "infer" a__ = None a__ = None a__ = None a__ = None a__ = None a__ = True a__ = None a__ = None a__ = None a__ = None a__ = False a__ = None a__ = None a__ = None a__ = True a__ = True a__ = False a__ = True a__ = None a__ = "." a__ = None a__ = '"' a__ = 0 a__ = None a__ = None a__ = None a__ = None a__ = True a__ = True a__ = 0 a__ = True a__ = False a__ = None a__ = 10000 a__ = None a__ = "strict" a__ = "error" a__ = None def _A ( self :List[str] ) -> Any: '''simple docstring''' if self.delimiter is not None: snake_case_ : Tuple = self.delimiter if self.column_names is not None: snake_case_ : List[Any] = self.column_names @property def _A ( self :Optional[Any] ) -> int: '''simple docstring''' snake_case_ : Optional[int] = { "sep": self.sep, "header": self.header, "names": self.names, "index_col": self.index_col, "usecols": self.usecols, "prefix": self.prefix, "mangle_dupe_cols": self.mangle_dupe_cols, "engine": self.engine, "converters": self.converters, "true_values": self.true_values, "false_values": self.false_values, "skipinitialspace": self.skipinitialspace, "skiprows": self.skiprows, "nrows": self.nrows, "na_values": self.na_values, "keep_default_na": self.keep_default_na, "na_filter": self.na_filter, "verbose": self.verbose, "skip_blank_lines": self.skip_blank_lines, "thousands": self.thousands, "decimal": self.decimal, "lineterminator": self.lineterminator, "quotechar": self.quotechar, "quoting": self.quoting, "escapechar": self.escapechar, "comment": self.comment, "encoding": self.encoding, "dialect": self.dialect, "error_bad_lines": self.error_bad_lines, "warn_bad_lines": self.warn_bad_lines, "skipfooter": self.skipfooter, "doublequote": self.doublequote, "memory_map": self.memory_map, "float_precision": self.float_precision, "chunksize": self.chunksize, "encoding_errors": self.encoding_errors, "on_bad_lines": self.on_bad_lines, "date_format": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class A_ (datasets.ArrowBasedBuilder ): """simple docstring""" a__ = CsvConfig def _A ( self :Optional[Any] ) -> Optional[Any]: '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def _A ( self :Tuple , lowerCAmelCase__ :Dict ) -> List[Any]: '''simple docstring''' if not self.config.data_files: raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) snake_case_ : Optional[Any] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(lowerCAmelCase__ , (str, list, tuple) ): snake_case_ : int = data_files if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): snake_case_ : List[str] = [files] snake_case_ : Tuple = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] snake_case_ : str = [] for split_name, files in data_files.items(): if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): snake_case_ : str = [files] snake_case_ : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files] splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) ) return splits def _A ( self :List[Any] , lowerCAmelCase__ :pa.Table ) -> pa.Table: '''simple docstring''' if self.config.features is not None: snake_case_ : int = self.config.features.arrow_schema if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ): # cheaper cast snake_case_ : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example snake_case_ : Dict = table_cast(lowerCAmelCase__ , lowerCAmelCase__ ) return pa_table def _A ( self :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]: '''simple docstring''' snake_case_ : Tuple = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str snake_case_ : str = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ): snake_case_ : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(lowerCAmelCase__ ): snake_case_ : Optional[int] = pa.Table.from_pandas(lowerCAmelCase__ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ ) except ValueError as e: logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCAmelCase__ )}: {e}''' ) raise
656
1
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class A_ (a_ ): """simple docstring""" def __init__( self :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int ) -> Optional[int]: '''simple docstring''' super().__init__() # make sure scheduler can always be converted to DDIM snake_case_ : Union[str, Any] = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ ) @torch.no_grad() def __call__( self :List[Any] , lowerCAmelCase__ :int = 1 , lowerCAmelCase__ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase__ :float = 0.0 , lowerCAmelCase__ :int = 50 , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Optional[str] = "pil" , lowerCAmelCase__ :bool = True , ) -> Union[ImagePipelineOutput, Tuple]: '''simple docstring''' if isinstance(self.unet.config.sample_size , lowerCAmelCase__ ): snake_case_ : Tuple = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: snake_case_ : Optional[int] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(lowerCAmelCase__ )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) snake_case_ : Optional[int] = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(lowerCAmelCase__ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output snake_case_ : int = self.unet(lowerCAmelCase__ , lowerCAmelCase__ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 snake_case_ : Any = self.scheduler.step( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , eta=lowerCAmelCase__ , use_clipped_model_output=lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample snake_case_ : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 ) snake_case_ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": snake_case_ : List[Any] = self.numpy_to_pil(lowerCAmelCase__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCAmelCase__ )
656
'''simple docstring''' import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A_ (a_ , unittest.TestCase ): """simple docstring""" a__ = MgpstrTokenizer a__ = False a__ = {} a__ = False def _A ( self :List[str] ) -> List[str]: '''simple docstring''' super().setUp() # fmt: off snake_case_ : Dict = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"] # fmt: on snake_case_ : List[str] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCAmelCase__ ) + "\n" ) def _A ( self :Optional[Any] , **lowerCAmelCase__ :Optional[Any] ) -> Dict: '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def _A ( self :Dict , lowerCAmelCase__ :Any ) -> str: '''simple docstring''' snake_case_ : Dict = "tester" snake_case_ : Tuple = "tester" return input_text, output_text @unittest.skip("MGP-STR always lower cases letters." ) def _A ( self :Dict ) -> str: '''simple docstring''' pass def _A ( self :Tuple ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[str] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): snake_case_ : Tuple = "[SPECIAL_TOKEN]" tokenizer.add_special_tokens({"cls_token": special_token} ) snake_case_ : str = tokenizer.encode([special_token] , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(len(lowerCAmelCase__ ) , 1 ) snake_case_ : Tuple = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) self.assertTrue(special_token not in decoded ) def _A ( self :int ) -> List[str]: '''simple docstring''' snake_case_ : Dict = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): snake_case_, snake_case_ : str = self.get_input_output_texts(lowerCAmelCase__ ) snake_case_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__ ) snake_case_ : List[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) snake_case_ : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ ) self.assertNotEqual(len(lowerCAmelCase__ ) , 0 ) snake_case_ : List[str] = tokenizer.decode(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertEqual(text_a.replace(" " , "" ) , lowerCAmelCase__ ) @unittest.skip("MGP-STR tokenizer only handles one sequence." ) def _A ( self :Union[str, Any] ) -> Any: '''simple docstring''' pass @unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" ) def _A ( self :int ) -> Dict: '''simple docstring''' pass
656
1
'''simple docstring''' def __UpperCAmelCase ( __magic_name__ )-> list[int]: """simple docstring""" if num <= 0: raise ValueError("Input must be a positive integer" ) snake_case_ : Union[str, Any] = [True] * (num + 1) snake_case_ : Any = 2 while p * p <= num: if primes[p]: for i in range(p * p ,num + 1 ,__magic_name__ ): snake_case_ : Tuple = False p += 1 return [prime for prime in range(2 ,num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() __lowerCamelCase : Optional[int] = int(input('''Enter a positive integer: ''').strip()) print(prime_sieve_eratosthenes(user_num))
656
'''simple docstring''' from __future__ import annotations import math import numpy as np from numpy.linalg import norm def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float: """simple docstring""" return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__magic_name__ ,__magic_name__ ) ) ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> list[list[list[float] | float]]: """simple docstring""" if dataset.ndim != value_array.ndim: snake_case_ : int = ( "Wrong input data's dimensions... " F'''dataset : {dataset.ndim}, value_array : {value_array.ndim}''' ) raise ValueError(__magic_name__ ) try: if dataset.shape[1] != value_array.shape[1]: snake_case_ : Dict = ( "Wrong input data's shape... " F'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}''' ) raise ValueError(__magic_name__ ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError("Wrong shape" ) if dataset.dtype != value_array.dtype: snake_case_ : Dict = ( "Input data have different datatype... " F'''dataset : {dataset.dtype}, value_array : {value_array.dtype}''' ) raise TypeError(__magic_name__ ) snake_case_ : Optional[int] = [] for value in value_array: snake_case_ : List[str] = euclidean(__magic_name__ ,dataset[0] ) snake_case_ : int = dataset[0].tolist() for dataset_value in dataset[1:]: snake_case_ : Optional[Any] = euclidean(__magic_name__ ,__magic_name__ ) if dist > temp_dist: snake_case_ : Tuple = temp_dist snake_case_ : Optional[int] = dataset_value.tolist() answer.append([vector, dist] ) return answer def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float: """simple docstring""" return np.dot(__magic_name__ ,__magic_name__ ) / (norm(__magic_name__ ) * norm(__magic_name__ )) if __name__ == "__main__": import doctest doctest.testmod()
656
1
'''simple docstring''' import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class A_ : """simple docstring""" def __init__( self :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int=99 , lowerCAmelCase__ :Tuple=13 , lowerCAmelCase__ :Any=7 , lowerCAmelCase__ :List[Any]=9 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :Dict=False , lowerCAmelCase__ :int=32 , lowerCAmelCase__ :Union[str, Any]=5 , lowerCAmelCase__ :Optional[int]=4 , lowerCAmelCase__ :Tuple=37 , lowerCAmelCase__ :Dict=8 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :str=0.0_0_2 , lowerCAmelCase__ :str=1 , lowerCAmelCase__ :str=0 , lowerCAmelCase__ :Optional[int]=0 , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Union[str, Any]=None , ) -> Dict: '''simple docstring''' snake_case_ : Dict = parent snake_case_ : Union[str, Any] = batch_size snake_case_ : Dict = encoder_seq_length snake_case_ : Dict = decoder_seq_length # For common tests snake_case_ : Any = self.decoder_seq_length snake_case_ : Tuple = is_training snake_case_ : Optional[int] = use_attention_mask snake_case_ : Any = use_labels snake_case_ : Any = vocab_size snake_case_ : Any = hidden_size snake_case_ : Tuple = num_hidden_layers snake_case_ : Union[str, Any] = num_attention_heads snake_case_ : Any = d_ff snake_case_ : Dict = relative_attention_num_buckets snake_case_ : Any = dropout_rate snake_case_ : int = initializer_factor snake_case_ : Union[str, Any] = eos_token_id snake_case_ : List[Any] = pad_token_id snake_case_ : List[Any] = decoder_start_token_id snake_case_ : Tuple = None snake_case_ : Tuple = decoder_layers def _A ( self :Optional[int] ) -> Tuple: '''simple docstring''' return TaConfig.from_pretrained("google/umt5-base" ) def _A ( self :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :str=None , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :Optional[int]=None , ) -> Union[str, Any]: '''simple docstring''' if attention_mask is None: snake_case_ : List[Any] = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: snake_case_ : Any = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: snake_case_ : Optional[int] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowerCAmelCase__ ) if decoder_head_mask is None: snake_case_ : Optional[int] = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowerCAmelCase__ ) if cross_attn_head_mask is None: snake_case_ : Dict = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=lowerCAmelCase__ ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def _A ( self :List[Any] ) -> Tuple: '''simple docstring''' snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input snake_case_ : Any = input_ids.clamp(self.pad_token_id + 1 ) snake_case_ : Optional[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 ) snake_case_ : Tuple = self.get_config() snake_case_ : Dict = config.num_attention_heads snake_case_ : Optional[Any] = self.prepare_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) return config, input_dict def _A ( self :Tuple ) -> Tuple: '''simple docstring''' snake_case_, snake_case_ : List[str] = self.prepare_config_and_inputs() return config, inputs_dict def _A ( self :Tuple ) -> List[str]: '''simple docstring''' return TaConfig( vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def _A ( self :List[Any] ) -> int: '''simple docstring''' return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def _A ( self :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int , ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[str] = UMTaModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ : Optional[Any] = model( input_ids=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , ) snake_case_ : Dict = model(input_ids=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ ) snake_case_ : Dict = result.last_hidden_state snake_case_ : Dict = result.past_key_values snake_case_ : List[Any] = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(lowerCAmelCase__ ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def _A ( self :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[int] , ) -> int: '''simple docstring''' snake_case_ : str = UMTaModel(config=lowerCAmelCase__ ).get_decoder().to(lowerCAmelCase__ ).eval() # first forward pass snake_case_ : int = model(lowerCAmelCase__ , use_cache=lowerCAmelCase__ ) snake_case_ : Optional[int] = model(lowerCAmelCase__ ) snake_case_ : Union[str, Any] = model(lowerCAmelCase__ , use_cache=lowerCAmelCase__ ) self.parent.assertTrue(len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ) ) self.parent.assertTrue(len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ) + 1 ) snake_case_, snake_case_ : Optional[Any] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids snake_case_ : List[str] = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and snake_case_ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) snake_case_ : List[str] = model(lowerCAmelCase__ )["last_hidden_state"] snake_case_ : str = model(lowerCAmelCase__ , past_key_values=lowerCAmelCase__ )["last_hidden_state"] # select random slice snake_case_ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item() snake_case_ : List[str] = output_from_no_past[:, -1, random_slice_idx].detach() snake_case_ : Optional[Any] = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) ) def _A ( self :str , lowerCAmelCase__ :str , lowerCAmelCase__ :int , ) -> Optional[Any]: '''simple docstring''' snake_case_ : Any = UMTaModel(config=lowerCAmelCase__ ).to(lowerCAmelCase__ ).half().eval() snake_case_ : Union[str, Any] = model(**lowerCAmelCase__ )["last_hidden_state"] self.parent.assertFalse(torch.isnan(lowerCAmelCase__ ).any().item() ) @require_torch class A_ (a_ , a_ , a_ , unittest.TestCase ): """simple docstring""" a__ = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) a__ = (UMTaForConditionalGeneration,) if is_torch_available() else () a__ = ( { '''conversational''': UMTaForConditionalGeneration, '''feature-extraction''': UMTaModel, '''summarization''': UMTaForConditionalGeneration, '''text2text-generation''': UMTaForConditionalGeneration, '''translation''': UMTaForConditionalGeneration, '''question-answering''': UMTaForQuestionAnswering, } if is_torch_available() else {} ) a__ = True a__ = False a__ = False a__ = True a__ = True # The small UMT5 model needs higher percentages for CPU/MP tests a__ = [0.8, 0.9] def _A ( self :List[Any] ) -> Tuple: '''simple docstring''' snake_case_ : Union[str, Any] = UMTaModelTester(self ) @unittest.skip("Test has a segmentation fault on torch 1.8.0" ) def _A ( self :Union[str, Any] ) -> Tuple: '''simple docstring''' snake_case_ : Dict = self.model_tester.prepare_config_and_inputs() snake_case_ : Optional[int] = UMTaModel(config_and_inputs[0] ).to(lowerCAmelCase__ ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( lowerCAmelCase__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'''{tmpdirname}/t5_test.onnx''' , export_params=lowerCAmelCase__ , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , ) @unittest.skipIf(torch_device == "cpu" , "Cant do half precision" ) def _A ( self :Optional[Any] ) -> str: '''simple docstring''' snake_case_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*lowerCAmelCase__ ) def _A ( self :Union[str, Any] ) -> Tuple: '''simple docstring''' snake_case_ : Tuple = ["encoder_attentions", "decoder_attentions", "cross_attentions"] snake_case_ : int = self.model_tester.prepare_config_and_inputs() snake_case_ : List[Any] = config_and_inputs[0] snake_case_ : Any = UMTaForConditionalGeneration(lowerCAmelCase__ ).eval() model.to(lowerCAmelCase__ ) snake_case_ : Optional[int] = { "head_mask": torch.zeros(config.num_layers , config.num_heads , device=lowerCAmelCase__ ), "decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCAmelCase__ ), "cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCAmelCase__ ), } for attn_name, (name, mask) in zip(lowerCAmelCase__ , head_masking.items() ): snake_case_ : Dict = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": snake_case_ : List[Any] = torch.ones( config.num_decoder_layers , config.num_heads , device=lowerCAmelCase__ ) snake_case_ : Any = model.generate( config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=lowerCAmelCase__ , return_dict_in_generate=lowerCAmelCase__ , **lowerCAmelCase__ , ) # We check the state of decoder_attentions and cross_attentions just from the last step snake_case_ : List[Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip("Does not work on the tiny model as we keep hitting edge cases." ) def _A ( self :int ) -> Dict: '''simple docstring''' pass @require_torch @require_sentencepiece @require_tokenizers class A_ (unittest.TestCase ): """simple docstring""" @slow @unittest.skip( "Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" ) def _A ( self :str ) -> Dict: '''simple docstring''' snake_case_ : List[Any] = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=lowerCAmelCase__ ).to(lowerCAmelCase__ ) snake_case_ : Dict = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=lowerCAmelCase__ , legacy=lowerCAmelCase__ ) snake_case_ : List[str] = [ "Bonjour monsieur <extra_id_0> bien <extra_id_1>.", "No se como puedo <extra_id_0>.", "This is the reason why we <extra_id_0> them.", "The <extra_id_0> walks in <extra_id_1>, seats", "A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.", ] snake_case_ : Any = tokenizer(lowerCAmelCase__ , return_tensors="pt" , padding=lowerCAmelCase__ ).input_ids # fmt: off snake_case_ : List[Any] = torch.tensor( [ [ 38_530, 210_703, 256_299, 1_410, 256_298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 826, 321, 671, 25_922, 256_299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1_460, 339, 312, 19_014, 10_620, 758, 256_299, 2_355,274, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 517, 256_299, 14_869, 281, 301, 256_298, 275, 119_983,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 320, 256_299, 14_869, 281, 2_234, 289, 2_275, 333,61_391, 289, 256_298, 543, 256_297, 168_714, 329, 256_296,274, 1], ] ) # fmt: on torch.testing.assert_allclose(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : Tuple = model.generate(input_ids.to(lowerCAmelCase__ ) ) snake_case_ : str = [ "<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>", "<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", "<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>", ] snake_case_ : List[Any] = tokenizer.batch_decode(lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
656
'''simple docstring''' import fire from utils import calculate_rouge, save_json def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__=None ,**__magic_name__ )-> Optional[Any]: """simple docstring""" snake_case_ : int = [x.strip() for x in open(__magic_name__ ).readlines()] snake_case_ : Optional[int] = [x.strip() for x in open(__magic_name__ ).readlines()][: len(__magic_name__ )] snake_case_ : List[Any] = calculate_rouge(__magic_name__ ,__magic_name__ ,**__magic_name__ ) if save_path is not None: save_json(__magic_name__ ,__magic_name__ ,indent=__magic_name__ ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
656
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __lowerCamelCase : Any = logging.get_logger(__name__) __lowerCamelCase : Union[str, Any] = { '''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''', # See all Nat models at https://huggingface.co/models?filter=nat } class A_ (a_ , a_ ): """simple docstring""" a__ = '''nat''' a__ = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self :str , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Union[str, Any]=3 , lowerCAmelCase__ :Dict=64 , lowerCAmelCase__ :List[Any]=[3, 4, 6, 5] , lowerCAmelCase__ :List[str]=[2, 4, 8, 16] , lowerCAmelCase__ :Optional[int]=7 , lowerCAmelCase__ :Dict=3.0 , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :Any=0.0 , lowerCAmelCase__ :List[Any]=0.1 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :List[str]=0.0_2 , lowerCAmelCase__ :List[Any]=1E-5 , lowerCAmelCase__ :Tuple=0.0 , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :Any=None , **lowerCAmelCase__ :Optional[int] , ) -> str: '''simple docstring''' super().__init__(**lowerCAmelCase__ ) snake_case_ : str = patch_size snake_case_ : Optional[Any] = num_channels snake_case_ : List[str] = embed_dim snake_case_ : Dict = depths snake_case_ : Optional[int] = len(lowerCAmelCase__ ) snake_case_ : List[Any] = num_heads snake_case_ : Optional[Any] = kernel_size snake_case_ : Dict = mlp_ratio snake_case_ : Optional[int] = qkv_bias snake_case_ : Union[str, Any] = hidden_dropout_prob snake_case_ : Optional[int] = attention_probs_dropout_prob snake_case_ : Union[str, Any] = drop_path_rate snake_case_ : int = hidden_act snake_case_ : Optional[int] = layer_norm_eps snake_case_ : Tuple = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model snake_case_ : Any = int(embed_dim * 2 ** (len(lowerCAmelCase__ ) - 1) ) snake_case_ : str = layer_scale_init_value snake_case_ : List[Any] = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(lowerCAmelCase__ ) + 1 )] snake_case_, snake_case_ : List[Any] = get_aligned_output_features_output_indices( out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
656
'''simple docstring''' import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) __lowerCamelCase : Optional[Any] = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''', f'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''', f'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias''')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ('''input_proj.weight''', '''input_projection.weight'''), ('''input_proj.bias''', '''input_projection.bias'''), ('''query_embed.weight''', '''query_position_embeddings.weight'''), ('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''), ('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''), ('''class_embed.weight''', '''class_labels_classifier.weight'''), ('''class_embed.bias''', '''class_labels_classifier.bias'''), ('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''), ('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''), ('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''), ('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''), ('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''), ('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''), ('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''), ('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''), ('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''), ('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''), ('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''), ('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''), ('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''), ('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''), ('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''), ('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''), ] ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int: """simple docstring""" snake_case_ : Optional[Any] = state_dict.pop(__magic_name__ ) snake_case_ : Any = val def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]: """simple docstring""" snake_case_ : Any = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: snake_case_ : Optional[Any] = key.replace("backbone.0.body" ,"backbone.conv_encoder.model" ) snake_case_ : int = value else: snake_case_ : int = value return new_state_dict def __UpperCAmelCase ( __magic_name__ ,__magic_name__=False )-> Optional[int]: """simple docstring""" snake_case_ : str = "" if is_panoptic: snake_case_ : Dict = "conditional_detr." # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) snake_case_ : Any = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) snake_case_ : Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict snake_case_ : Tuple = in_proj_weight[:256, :] snake_case_ : List[Any] = in_proj_bias[:256] snake_case_ : Optional[Any] = in_proj_weight[256:512, :] snake_case_ : Optional[int] = in_proj_bias[256:512] snake_case_ : Optional[int] = in_proj_weight[-256:, :] snake_case_ : str = in_proj_bias[-256:] def __UpperCAmelCase ( )-> Optional[Any]: """simple docstring""" snake_case_ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg" snake_case_ : Optional[Any] = Image.open(requests.get(__magic_name__ ,stream=__magic_name__ ).raw ) return im @torch.no_grad() def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[str]: """simple docstring""" snake_case_ : Optional[Any] = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: snake_case_ : Optional[Any] = "resnet101" if "dc5" in model_name: snake_case_ : List[str] = True snake_case_ : Tuple = "panoptic" in model_name if is_panoptic: snake_case_ : List[Any] = 250 else: snake_case_ : Optional[Any] = 91 snake_case_ : Optional[int] = "huggingface/label-files" snake_case_ : Dict = "coco-detection-id2label.json" snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) ) snake_case_ : Optional[int] = {int(__magic_name__ ): v for k, v in idalabel.items()} snake_case_ : int = idalabel snake_case_ : Dict = {v: k for k, v in idalabel.items()} # load image processor snake_case_ : Optional[int] = "coco_panoptic" if is_panoptic else "coco_detection" snake_case_ : str = ConditionalDetrImageProcessor(format=__magic_name__ ) # prepare image snake_case_ : str = prepare_img() snake_case_ : int = image_processor(images=__magic_name__ ,return_tensors="pt" ) snake_case_ : Union[str, Any] = encoding["pixel_values"] logger.info(F'''Converting model {model_name}...''' ) # load original model from torch hub snake_case_ : Union[str, Any] = torch.hub.load("DeppMeng/ConditionalDETR" ,__magic_name__ ,pretrained=__magic_name__ ).eval() snake_case_ : Any = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: snake_case_ : Any = "conditional_detr." + src rename_key(__magic_name__ ,__magic_name__ ,__magic_name__ ) snake_case_ : Tuple = rename_backbone_keys(__magic_name__ ) # query, key and value matrices need special treatment read_in_q_k_v(__magic_name__ ,is_panoptic=__magic_name__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them snake_case_ : int = "conditional_detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("conditional_detr" ) and not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ) ): snake_case_ : Any = state_dict.pop(__magic_name__ ) snake_case_ : Optional[int] = val elif "class_labels_classifier" in key or "bbox_predictor" in key: snake_case_ : Tuple = state_dict.pop(__magic_name__ ) snake_case_ : Any = val elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ): continue else: snake_case_ : Union[str, Any] = state_dict.pop(__magic_name__ ) snake_case_ : List[Any] = val else: if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): snake_case_ : Any = state_dict.pop(__magic_name__ ) snake_case_ : List[Any] = val # finally, create HuggingFace model and load state dict snake_case_ : Optional[int] = ConditionalDetrForSegmentation(__magic_name__ ) if is_panoptic else ConditionalDetrForObjectDetection(__magic_name__ ) model.load_state_dict(__magic_name__ ) model.eval() model.push_to_hub(repo_id=__magic_name__ ,organization="DepuMeng" ,commit_message="Add model" ) # verify our conversion snake_case_ : Dict = conditional_detr(__magic_name__ ) snake_case_ : Union[str, Any] = model(__magic_name__ ) assert torch.allclose(outputs.logits ,original_outputs["pred_logits"] ,atol=1E-4 ) assert torch.allclose(outputs.pred_boxes ,original_outputs["pred_boxes"] ,atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks ,original_outputs["pred_masks"] ,atol=1E-4 ) # Save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) model.save_pretrained(__magic_name__ ) image_processor.save_pretrained(__magic_name__ ) if __name__ == "__main__": __lowerCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''conditional_detr_resnet50''', type=str, help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) __lowerCamelCase : int = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
656
1
'''simple docstring''' import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class A_ (tf.keras.layers.Layer ): """simple docstring""" def __init__( self :Optional[int] , lowerCAmelCase__ :Dict[str, int] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int = None , lowerCAmelCase__ :int = None ) -> Optional[Any]: '''simple docstring''' super().__init__() snake_case_ : Optional[int] = pad_token_id snake_case_ : List[str] = max_length snake_case_ : Tuple = vocab snake_case_ : int = merges snake_case_ : Tuple = BytePairTokenizer(lowerCAmelCase__ , lowerCAmelCase__ , sequence_length=lowerCAmelCase__ ) @classmethod def _A ( cls :str , lowerCAmelCase__ :GPTaTokenizer , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :int ) -> Optional[int]: '''simple docstring''' snake_case_ : Union[str, Any] = [" ".join(lowerCAmelCase__ ) for m in tokenizer.bpe_ranks.keys()] snake_case_ : Optional[Any] = tokenizer.get_vocab() return cls(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) @classmethod def _A ( cls :str , lowerCAmelCase__ :Union[str, os.PathLike] , *lowerCAmelCase__ :str , **lowerCAmelCase__ :List[str] ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[str] = GPTaTokenizer.from_pretrained(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) return cls.from_tokenizer(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) @classmethod def _A ( cls :Tuple , lowerCAmelCase__ :Optional[int] ) -> Dict: '''simple docstring''' return cls(**lowerCAmelCase__ ) def _A ( self :List[str] ) -> int: '''simple docstring''' return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def _A ( self :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int = None ) -> Optional[Any]: '''simple docstring''' snake_case_ : Optional[Any] = self.tf_tokenizer(lowerCAmelCase__ ) snake_case_ : Tuple = tf.ones_like(lowerCAmelCase__ ) if self.pad_token_id is not None: # pad the tokens up to max length snake_case_ : Any = max_length if max_length is not None else self.max_length if max_length is not None: snake_case_, snake_case_ : List[str] = pad_model_inputs( lowerCAmelCase__ , max_seq_length=lowerCAmelCase__ , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
656
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import XLMRobertaTokenizer from diffusers import ( AltDiffusionImgaImgPipeline, AutoencoderKL, PNDMScheduler, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class A_ (unittest.TestCase ): """simple docstring""" def _A ( self :Any ) -> str: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _A ( self :List[Any] ) -> List[str]: '''simple docstring''' snake_case_ : Any = 1 snake_case_ : Dict = 3 snake_case_ : Union[str, Any] = (32, 32) snake_case_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ ) return image @property def _A ( self :Optional[int] ) -> Any: '''simple docstring''' torch.manual_seed(0 ) snake_case_ : List[str] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) return model @property def _A ( self :Dict ) -> Any: '''simple docstring''' torch.manual_seed(0 ) snake_case_ : Optional[Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) return model @property def _A ( self :Dict ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) snake_case_ : str = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , ) return RobertaSeriesModelWithTransformation(lowerCAmelCase__ ) @property def _A ( self :Any ) -> str: '''simple docstring''' def extract(*lowerCAmelCase__ :Any , **lowerCAmelCase__ :List[str] ): class A_ : """simple docstring""" def __init__( self :Optional[int] ) -> List[str]: '''simple docstring''' snake_case_ : str = torch.ones([0] ) def _A ( self :int , lowerCAmelCase__ :List[Any] ) -> Tuple: '''simple docstring''' self.pixel_values.to(lowerCAmelCase__ ) return self return Out() return extract def _A ( self :int ) -> Dict: '''simple docstring''' snake_case_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator snake_case_ : str = self.dummy_cond_unet snake_case_ : Optional[int] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ ) snake_case_ : Dict = self.dummy_vae snake_case_ : Dict = self.dummy_text_encoder snake_case_ : Optional[int] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" ) snake_case_ : str = 77 snake_case_ : Any = self.dummy_image.to(lowerCAmelCase__ ) snake_case_ : Tuple = init_image / 2 + 0.5 # make sure here that pndm scheduler skips prk snake_case_ : Optional[Any] = AltDiffusionImgaImgPipeline( unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , ) snake_case_ : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ ) snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ ) alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) snake_case_ : Dict = "A painting of a squirrel eating a burger" snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 ) snake_case_ : Dict = alt_pipe( [prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , ) snake_case_ : Any = output.images snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 ) snake_case_ : Optional[Any] = alt_pipe( [prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0] snake_case_ : Tuple = image[0, -3:, -3:, -1] snake_case_ : Dict = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case_ : int = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def _A ( self :int ) -> List[str]: '''simple docstring''' snake_case_ : Union[str, Any] = self.dummy_cond_unet snake_case_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ ) snake_case_ : int = self.dummy_vae snake_case_ : List[Any] = self.dummy_text_encoder snake_case_ : int = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" ) snake_case_ : int = 77 snake_case_ : Dict = self.dummy_image.to(lowerCAmelCase__ ) # put models in fp16 snake_case_ : Optional[Any] = unet.half() snake_case_ : Tuple = vae.half() snake_case_ : List[str] = bert.half() # make sure here that pndm scheduler skips prk snake_case_ : Optional[int] = AltDiffusionImgaImgPipeline( unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , ) snake_case_ : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ ) snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ ) alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) snake_case_ : List[Any] = "A painting of a squirrel eating a burger" snake_case_ : str = torch.manual_seed(0 ) snake_case_ : Any = alt_pipe( [prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , ).images assert image.shape == (1, 32, 32, 3) @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def _A ( self :Optional[int] ) -> Any: '''simple docstring''' snake_case_ : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) # resize to resolution that is divisible by 8 but not 16 or 32 snake_case_ : str = init_image.resize((760, 504) ) snake_case_ : Optional[Any] = "BAAI/AltDiffusion" snake_case_ : int = AltDiffusionImgaImgPipeline.from_pretrained( lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() snake_case_ : Tuple = "A fantasy landscape, trending on artstation" snake_case_ : int = torch.manual_seed(0 ) snake_case_ : List[str] = pipe( prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , ) snake_case_ : str = output.images[0] snake_case_ : List[Any] = image[255:258, 383:386, -1] assert image.shape == (504, 760, 3) snake_case_ : Tuple = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class A_ (unittest.TestCase ): """simple docstring""" def _A ( self :Optional[Any] ) -> Optional[int]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _A ( self :str ) -> Any: '''simple docstring''' snake_case_ : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) snake_case_ : List[Any] = init_image.resize((768, 512) ) snake_case_ : Tuple = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" ) snake_case_ : Any = "BAAI/AltDiffusion" snake_case_ : List[str] = AltDiffusionImgaImgPipeline.from_pretrained( lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() snake_case_ : Tuple = "A fantasy landscape, trending on artstation" snake_case_ : Tuple = torch.manual_seed(0 ) snake_case_ : List[Any] = pipe( prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , ) snake_case_ : Optional[int] = output.images[0] assert image.shape == (512, 768, 3) # img2img is flaky across GPUs even in fp32, so using MAE here assert np.abs(expected_image - image ).max() < 1E-2
656
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available, is_vision_available, ) __lowerCamelCase : Optional[Any] = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[str] = ['''BeitFeatureExtractor'''] __lowerCamelCase : Tuple = ['''BeitImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Optional[int] = [ '''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BeitForImageClassification''', '''BeitForMaskedImageModeling''', '''BeitForSemanticSegmentation''', '''BeitModel''', '''BeitPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = [ '''FlaxBeitForImageClassification''', '''FlaxBeitForMaskedImageModeling''', '''FlaxBeitModel''', '''FlaxBeitPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_beit import BeitFeatureExtractor from .image_processing_beit import BeitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_beit import ( BEIT_PRETRAINED_MODEL_ARCHIVE_LIST, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, BeitPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_beit import ( FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel, FlaxBeitPreTrainedModel, ) else: import sys __lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
656
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. __lowerCamelCase : List[str] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''} @is_pipeline_test class A_ (unittest.TestCase ): """simple docstring""" a__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING a__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: a__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: a__ = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def _A ( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict ) -> Any: '''simple docstring''' snake_case_ : Optional[Any] = ZeroShotClassificationPipeline( model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , candidate_labels=["polics", "health"] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def _A ( self :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[Any]: '''simple docstring''' snake_case_ : Tuple = classifier("Who are you voting for in 2020?" , candidate_labels="politics" ) self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} ) # No kwarg snake_case_ : List[Any] = classifier("Who are you voting for in 2020?" , ["politics"] ) self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} ) snake_case_ : Dict = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] ) self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} ) snake_case_ : int = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" ) self.assertEqual( lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 ) snake_case_ : Optional[int] = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] ) self.assertEqual( lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 ) snake_case_ : str = classifier( "Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" ) self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} ) # https://github.com/huggingface/transformers/issues/13846 snake_case_ : Dict = classifier(["I am happy"] , ["positive", "negative"] ) self.assertEqual( lowerCAmelCase__ , [ {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} for i in range(1 ) ] , ) snake_case_ : Tuple = classifier(["I am happy", "I am sad"] , ["positive", "negative"] ) self.assertEqual( lowerCAmelCase__ , [ {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} for i in range(2 ) ] , ) with self.assertRaises(lowerCAmelCase__ ): classifier("" , candidate_labels="politics" ) with self.assertRaises(lowerCAmelCase__ ): classifier(lowerCAmelCase__ , candidate_labels="politics" ) with self.assertRaises(lowerCAmelCase__ ): classifier("Who are you voting for in 2020?" , candidate_labels="" ) with self.assertRaises(lowerCAmelCase__ ): classifier("Who are you voting for in 2020?" , candidate_labels=lowerCAmelCase__ ) with self.assertRaises(lowerCAmelCase__ ): classifier( "Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , ) with self.assertRaises(lowerCAmelCase__ ): classifier( "Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=lowerCAmelCase__ , ) self.run_entailment_id(lowerCAmelCase__ ) def _A ( self :List[Any] , lowerCAmelCase__ :Pipeline ) -> Union[str, Any]: '''simple docstring''' snake_case_ : int = zero_shot_classifier.model.config snake_case_ : Optional[int] = config.labelaid snake_case_ : Tuple = zero_shot_classifier.entailment_id snake_case_ : Optional[Any] = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2} self.assertEqual(zero_shot_classifier.entailment_id , -1 ) snake_case_ : Tuple = {"entailment": 0, "neutral": 1, "contradiction": 2} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) snake_case_ : str = {"ENTAIL": 0, "NON-ENTAIL": 1} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) snake_case_ : str = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0} self.assertEqual(zero_shot_classifier.entailment_id , 2 ) snake_case_ : List[str] = original_labelaid self.assertEqual(lowerCAmelCase__ , zero_shot_classifier.entailment_id ) @require_torch def _A ( self :Tuple ) -> Any: '''simple docstring''' snake_case_ : List[Any] = pipeline( "zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( "Who are you voting for in 2020?" * 100 , candidate_labels=["politics", "public health", "science"] ) @require_torch def _A ( self :Optional[Any] ) -> Tuple: '''simple docstring''' snake_case_ : Union[str, Any] = pipeline( "zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , ) snake_case_ : int = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { "sequence": "Who are you voting for in 2020?", "labels": ["science", "public health", "politics"], "scores": [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @require_tf def _A ( self :Union[str, Any] ) -> Dict: '''simple docstring''' snake_case_ : List[str] = pipeline( "zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , ) snake_case_ : Optional[int] = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { "sequence": "Who are you voting for in 2020?", "labels": ["science", "public health", "politics"], "scores": [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @slow @require_torch def _A ( self :Union[str, Any] ) -> int: '''simple docstring''' snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" ) snake_case_ : str = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { "sequence": "Who are you voting for in 2020?", "labels": ["politics", "public health", "science"], "scores": [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) snake_case_ : Optional[int] = zero_shot_classifier( "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks" " in an encoder-decoder configuration. The best performing models also connect the encoder and decoder" " through an attention mechanism. We propose a new simple network architecture, the Transformer, based" " solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two" " machine translation tasks show these models to be superior in quality while being more parallelizable" " and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014" " English-to-German translation task, improving over the existing best results, including ensembles by" " over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new" " single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small" " fraction of the training costs of the best models from the literature. We show that the Transformer" " generalizes well to other tasks by applying it successfully to English constituency parsing both with" " large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { "sequence": ( "The dominant sequence transduction models are based on complex recurrent or convolutional neural" " networks in an encoder-decoder configuration. The best performing models also connect the" " encoder and decoder through an attention mechanism. We propose a new simple network" " architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence" " and convolutions entirely. Experiments on two machine translation tasks show these models to be" " superior in quality while being more parallelizable and requiring significantly less time to" " train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task," " improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014" " English-to-French translation task, our model establishes a new single-model state-of-the-art" " BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training" " costs of the best models from the literature. We show that the Transformer generalizes well to" " other tasks by applying it successfully to English constituency parsing both with large and" " limited training data." ), "labels": ["translation", "machine learning", "vision", "statistics"], "scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , ) @slow @require_tf def _A ( self :List[str] ) -> str: '''simple docstring''' snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" ) snake_case_ : Optional[Any] = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { "sequence": "Who are you voting for in 2020?", "labels": ["politics", "public health", "science"], "scores": [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) snake_case_ : Tuple = zero_shot_classifier( "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks" " in an encoder-decoder configuration. The best performing models also connect the encoder and decoder" " through an attention mechanism. We propose a new simple network architecture, the Transformer, based" " solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two" " machine translation tasks show these models to be superior in quality while being more parallelizable" " and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014" " English-to-German translation task, improving over the existing best results, including ensembles by" " over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new" " single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small" " fraction of the training costs of the best models from the literature. We show that the Transformer" " generalizes well to other tasks by applying it successfully to English constituency parsing both with" " large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { "sequence": ( "The dominant sequence transduction models are based on complex recurrent or convolutional neural" " networks in an encoder-decoder configuration. The best performing models also connect the" " encoder and decoder through an attention mechanism. We propose a new simple network" " architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence" " and convolutions entirely. Experiments on two machine translation tasks show these models to be" " superior in quality while being more parallelizable and requiring significantly less time to" " train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task," " improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014" " English-to-French translation task, our model establishes a new single-model state-of-the-art" " BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training" " costs of the best models from the literature. We show that the Transformer generalizes well to" " other tasks by applying it successfully to English constituency parsing both with large and" " limited training data." ), "labels": ["translation", "machine learning", "vision", "statistics"], "scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , )
656
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class A_ (metaclass=a_ ): """simple docstring""" a__ = ['''transformers''', '''torch''', '''note_seq'''] def __init__( self :Any , *lowerCAmelCase__ :Dict , **lowerCAmelCase__ :List[Any] ) -> Dict: '''simple docstring''' requires_backends(self , ["transformers", "torch", "note_seq"] ) @classmethod def _A ( cls :Union[str, Any] , *lowerCAmelCase__ :List[str] , **lowerCAmelCase__ :int ) -> int: '''simple docstring''' requires_backends(cls , ["transformers", "torch", "note_seq"] ) @classmethod def _A ( cls :List[Any] , *lowerCAmelCase__ :Optional[int] , **lowerCAmelCase__ :List[Any] ) -> Dict: '''simple docstring''' requires_backends(cls , ["transformers", "torch", "note_seq"] )
656
'''simple docstring''' import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''): raise Exception('''requires fairseq >= 1.0.0a''') logging.set_verbosity_info() __lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) __lowerCamelCase : Union[str, Any] = '''Hello world! cécé herlolip''' def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[Any]: """simple docstring""" snake_case_ : str = FairseqRobertaModel.from_pretrained(__magic_name__ ) roberta.eval() # disable dropout snake_case_ : Dict = roberta.model.encoder.sentence_encoder snake_case_ : List[str] = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,) if classification_head: snake_case_ : List[str] = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0] print("Our RoBERTa config:" ,__magic_name__ ) snake_case_ : List[str] = XLMRobertaXLForSequenceClassification(__magic_name__ ) if classification_head else XLMRobertaXLForMaskedLM(__magic_name__ ) model.eval() # Now let's copy all the weights. # Embeddings snake_case_ : List[Any] = roberta_sent_encoder.embed_tokens.weight snake_case_ : int = roberta_sent_encoder.embed_positions.weight snake_case_ : Union[str, Any] = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. snake_case_ : Union[str, Any] = roberta_sent_encoder.layer_norm.weight snake_case_ : str = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer snake_case_ : BertLayer = model.roberta.encoder.layer[i] snake_case_ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i] snake_case_ : RobertaAttention = layer.attention snake_case_ : Dict = roberta_layer.self_attn_layer_norm.weight snake_case_ : Dict = roberta_layer.self_attn_layer_norm.bias # self attention snake_case_ : BertSelfAttention = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) snake_case_ : Dict = roberta_layer.self_attn.q_proj.weight snake_case_ : Any = roberta_layer.self_attn.q_proj.bias snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.weight snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.bias snake_case_ : Optional[int] = roberta_layer.self_attn.v_proj.weight snake_case_ : Any = roberta_layer.self_attn.v_proj.bias # self-attention output snake_case_ : BertSelfOutput = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape snake_case_ : List[str] = roberta_layer.self_attn.out_proj.weight snake_case_ : Optional[int] = roberta_layer.self_attn.out_proj.bias # this one is final layer norm snake_case_ : int = roberta_layer.final_layer_norm.weight snake_case_ : Union[str, Any] = roberta_layer.final_layer_norm.bias # intermediate snake_case_ : BertIntermediate = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape snake_case_ : List[str] = roberta_layer.fca.weight snake_case_ : List[Any] = roberta_layer.fca.bias # output snake_case_ : BertOutput = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape snake_case_ : Any = roberta_layer.fca.weight snake_case_ : Any = roberta_layer.fca.bias # end of layer if classification_head: snake_case_ : int = roberta.model.classification_heads["mnli"].dense.weight snake_case_ : Union[str, Any] = roberta.model.classification_heads["mnli"].dense.bias snake_case_ : Tuple = roberta.model.classification_heads["mnli"].out_proj.weight snake_case_ : str = roberta.model.classification_heads["mnli"].out_proj.bias else: # LM Head snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.dense.weight snake_case_ : int = roberta.model.encoder.lm_head.dense.bias snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.weight snake_case_ : Optional[int] = roberta.model.encoder.lm_head.layer_norm.bias snake_case_ : int = roberta.model.encoder.lm_head.weight snake_case_ : List[str] = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. snake_case_ : torch.Tensor = roberta.encode(__magic_name__ ).unsqueeze(0 ) # batch of size 1 snake_case_ : Union[str, Any] = model(__magic_name__ )[0] if classification_head: snake_case_ : Optional[Any] = roberta.model.classification_heads["mnli"](roberta.extract_features(__magic_name__ ) ) else: snake_case_ : List[str] = roberta.model(__magic_name__ )[0] print(our_output.shape ,their_output.shape ) snake_case_ : str = torch.max(torch.abs(our_output - their_output ) ).item() print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 snake_case_ : Any = torch.allclose(__magic_name__ ,__magic_name__ ,atol=1E-3 ) print("Do both models output the same tensors?" ,"🔥" if success else "💩" ) if not success: raise Exception("Something went wRoNg" ) pathlib.Path(__magic_name__ ).mkdir(parents=__magic_name__ ,exist_ok=__magic_name__ ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__magic_name__ ) if __name__ == "__main__": __lowerCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.''' ) __lowerCamelCase : Tuple = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
656
1
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class A_ (a_ , a_ , unittest.TestCase ): """simple docstring""" a__ = IFInpaintingSuperResolutionPipeline a__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} a__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} ) a__ = PipelineTesterMixin.required_optional_params - {'''latents'''} def _A ( self :Tuple ) -> List[str]: '''simple docstring''' return self._get_superresolution_dummy_components() def _A ( self :Optional[int] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any]=0 ) -> List[Any]: '''simple docstring''' if str(lowerCAmelCase__ ).startswith("mps" ): snake_case_ : List[Any] = torch.manual_seed(lowerCAmelCase__ ) else: snake_case_ : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) snake_case_ : List[str] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ ) snake_case_ : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ ) snake_case_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ ) snake_case_ : List[Any] = { "prompt": "A painting of a squirrel eating a burger", "image": image, "original_image": original_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def _A ( self :Union[str, Any] ) -> int: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def _A ( self :List[Any] ) -> Dict: '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def _A ( self :List[Any] ) -> Optional[Any]: '''simple docstring''' super().test_save_load_floataa(expected_max_diff=1E-1 ) def _A ( self :Tuple ) -> List[Any]: '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def _A ( self :Any ) -> Dict: '''simple docstring''' self._test_save_load_local() def _A ( self :List[str] ) -> Any: '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
656
'''simple docstring''' import os import sys import tempfile import torch from .state import AcceleratorState from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=None ,__magic_name__="no" ,__magic_name__="29500" )-> Optional[int]: """simple docstring""" snake_case_ : str = False snake_case_ : int = False if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ): snake_case_ : Any = True elif "IPython" in sys.modules: snake_case_ : Union[str, Any] = "google.colab" in str(sys.modules["IPython"].get_ipython() ) try: snake_case_ : Any = PrecisionType(mixed_precision.lower() ) except ValueError: raise ValueError( F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' ) if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" ,__magic_name__ ) is not None): # TPU launch import torch_xla.distributed.xla_multiprocessing as xmp if len(AcceleratorState._shared_state ) > 0: raise ValueError( "To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside " "your training function. Restart your notebook and make sure no cells initializes an " "`Accelerator`." ) if num_processes is None: snake_case_ : Tuple = 8 snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="TPU" ) print(F'''Launching a training on {num_processes} TPU cores.''' ) xmp.spawn(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" ) elif in_colab: # No need for a distributed launch otherwise as it's either CPU or one GPU. if torch.cuda.is_available(): print("Launching training on one GPU." ) else: print("Launching training on one CPU." ) function(*__magic_name__ ) else: if num_processes is None: raise ValueError( "You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." ) if num_processes > 1: # Multi-GPU launch from torch.multiprocessing import start_processes from torch.multiprocessing.spawn import ProcessRaisedException if len(AcceleratorState._shared_state ) > 0: raise ValueError( "To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized " "inside your training function. Restart your notebook and make sure no cells initializes an " "`Accelerator`." ) if torch.cuda.is_initialized(): raise ValueError( "To launch a multi-GPU training from your notebook, you need to avoid running any instruction " "using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA " "function." ) # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=__magic_name__ ,master_addr="127.0.01" ,master_port=__magic_name__ ,mixed_precision=__magic_name__ ): snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="MULTI_GPU" ) print(F'''Launching training on {num_processes} GPUs.''' ) try: start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" ) except ProcessRaisedException as e: if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: raise RuntimeError( "CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. " "This likely stems from an outside import causing issues once the `notebook_launcher()` is called. " "Please review your imports and test them when running the `notebook_launcher()` to identify " "which one is problematic." ) from e else: # No need for a distributed launch otherwise as it's either CPU, GPU or MPS. if is_mps_available(): snake_case_ : Any = "1" print("Launching training on MPS." ) elif torch.cuda.is_available(): print("Launching training on one GPU." ) else: print("Launching training on CPU." ) function(*__magic_name__ ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=2 )-> Dict: """simple docstring""" from torch.multiprocessing import start_processes with tempfile.NamedTemporaryFile() as tmp_file: # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=__magic_name__ ,master_addr="127.0.01" ,master_port="29500" ,accelerate_mixed_precision="no" ,accelerate_debug_rdv_file=tmp_file.name ,accelerate_use_cpu="yes" ,): snake_case_ : Any = PrepareForLaunch(__magic_name__ ,debug=__magic_name__ ) start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
656
1
'''simple docstring''' import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class A_ : """simple docstring""" def __init__( self :Dict , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Union[str, Any]=sys.maxsize ) -> List[Any]: '''simple docstring''' snake_case_ : Union[str, Any] = "bilinear" snake_case_ : str = max_size snake_case_ : Union[str, Any] = short_edge_length def __call__( self :Tuple , lowerCAmelCase__ :Any ) -> int: '''simple docstring''' snake_case_ : Dict = [] for img in imgs: snake_case_, snake_case_ : Dict = img.shape[:2] # later: provide list and randomly choose index for resize snake_case_ : List[str] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img snake_case_ : int = size * 1.0 / min(lowerCAmelCase__ , lowerCAmelCase__ ) if h < w: snake_case_, snake_case_ : Optional[Any] = size, scale * w else: snake_case_, snake_case_ : int = scale * h, size if max(lowerCAmelCase__ , lowerCAmelCase__ ) > self.max_size: snake_case_ : str = self.max_size * 1.0 / max(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : Any = newh * scale snake_case_ : Any = neww * scale snake_case_ : Dict = int(neww + 0.5 ) snake_case_ : List[str] = int(newh + 0.5 ) if img.dtype == np.uinta: snake_case_ : Dict = Image.fromarray(lowerCAmelCase__ ) snake_case_ : List[Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) snake_case_ : List[Any] = np.asarray(lowerCAmelCase__ ) else: snake_case_ : List[Any] = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw snake_case_ : Optional[Any] = nn.functional.interpolate( lowerCAmelCase__ , (newh, neww) , mode=self.interp_method , align_corners=lowerCAmelCase__ ).squeeze(0 ) img_augs.append(lowerCAmelCase__ ) return img_augs class A_ : """simple docstring""" def __init__( self :List[Any] , lowerCAmelCase__ :Dict ) -> int: '''simple docstring''' snake_case_ : Dict = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) snake_case_ : Optional[Any] = cfg.INPUT.FORMAT snake_case_ : List[str] = cfg.SIZE_DIVISIBILITY snake_case_ : Tuple = cfg.PAD_VALUE snake_case_ : Tuple = cfg.INPUT.MAX_SIZE_TEST snake_case_ : Any = cfg.MODEL.DEVICE snake_case_ : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) snake_case_ : List[Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) snake_case_ : List[Any] = lambda lowerCAmelCase__ : (x - self.pixel_mean) / self.pixel_std def _A ( self :Optional[int] , lowerCAmelCase__ :Any ) -> int: '''simple docstring''' snake_case_ : Union[str, Any] = tuple(max(lowerCAmelCase__ ) for s in zip(*[img.shape for img in images] ) ) snake_case_ : List[Any] = [im.shape[-2:] for im in images] snake_case_ : Any = [ nn.functional.pad( lowerCAmelCase__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(lowerCAmelCase__ , lowerCAmelCase__ ) ] return torch.stack(lowerCAmelCase__ ), torch.tensor(lowerCAmelCase__ ) def __call__( self :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any]=False ) -> Optional[int]: '''simple docstring''' with torch.no_grad(): if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): snake_case_ : Union[str, Any] = [images] if single_image: assert len(lowerCAmelCase__ ) == 1 for i in range(len(lowerCAmelCase__ ) ): if isinstance(images[i] , torch.Tensor ): images.insert(lowerCAmelCase__ , images.pop(lowerCAmelCase__ ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( lowerCAmelCase__ , torch.as_tensor(img_tensorize(images.pop(lowerCAmelCase__ ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge snake_case_ : Optional[Any] = torch.tensor([im.shape[:2] for im in images] ) snake_case_ : Any = self.aug(lowerCAmelCase__ ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic snake_case_ : str = [self.normalizer(lowerCAmelCase__ ) for x in images] # now pad them to do the following operations snake_case_, snake_case_ : List[str] = self.pad(lowerCAmelCase__ ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad snake_case_ : List[Any] = torch.true_divide(lowerCAmelCase__ , lowerCAmelCase__ ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str: """simple docstring""" boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Dict: """simple docstring""" assert torch.isfinite(__magic_name__ ).all(), "Box tensor contains infinite or NaN!" snake_case_, snake_case_ : str = box_size tensor[:, 0].clamp_(min=0 ,max=__magic_name__ ) tensor[:, 1].clamp_(min=0 ,max=__magic_name__ ) tensor[:, 2].clamp_(min=0 ,max=__magic_name__ ) tensor[:, 3].clamp_(min=0 ,max=__magic_name__ )
656
'''simple docstring''' from collections import deque from math import floor from random import random from time import time class A_ : """simple docstring""" def __init__( self :Dict ) -> List[str]: '''simple docstring''' snake_case_ : int = {} def _A ( self :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any]=1 ) -> Any: '''simple docstring''' if self.graph.get(lowerCAmelCase__ ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: snake_case_ : Optional[int] = [[w, v]] if not self.graph.get(lowerCAmelCase__ ): snake_case_ : Dict = [] def _A ( self :List[Any] ) -> Optional[int]: '''simple docstring''' return list(self.graph ) def _A ( self :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :int ) -> List[Any]: '''simple docstring''' if self.graph.get(lowerCAmelCase__ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowerCAmelCase__ ) def _A ( self :List[str] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :str=-1 ) -> str: '''simple docstring''' if s == d: return [] snake_case_ : str = [] snake_case_ : Optional[int] = [] if s == -2: snake_case_ : List[Any] = list(self.graph )[0] stack.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) snake_case_ : Dict = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: snake_case_ : str = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowerCAmelCase__ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) snake_case_ : str = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowerCAmelCase__ ) != 0: snake_case_ : Union[str, Any] = stack[len(lowerCAmelCase__ ) - 1] else: snake_case_ : Optional[Any] = ss # check if se have reached the starting point if len(lowerCAmelCase__ ) == 0: return visited def _A ( self :Tuple , lowerCAmelCase__ :int=-1 ) -> int: '''simple docstring''' if c == -1: snake_case_ : Any = floor(random() * 10_000 ) + 10 for i in range(lowerCAmelCase__ ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): snake_case_ : Optional[Any] = floor(random() * c ) + 1 if n != i: self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 ) def _A ( self :Tuple , lowerCAmelCase__ :Dict=-2 ) -> Dict: '''simple docstring''' snake_case_ : Union[str, Any] = deque() snake_case_ : Optional[Any] = [] if s == -2: snake_case_ : Tuple = list(self.graph )[0] d.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) while d: snake_case_ : Optional[int] = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _A ( self :List[str] , lowerCAmelCase__ :str ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Tuple = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def _A ( self :Any , lowerCAmelCase__ :int ) -> Optional[Any]: '''simple docstring''' return len(self.graph[u] ) def _A ( self :Tuple , lowerCAmelCase__ :List[str]=-2 ) -> Optional[Any]: '''simple docstring''' snake_case_ : str = [] snake_case_ : str = [] if s == -2: snake_case_ : Optional[Any] = list(self.graph )[0] stack.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) snake_case_ : int = s snake_case_ : Optional[int] = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: snake_case_ : List[Any] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) snake_case_ : List[str] = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(lowerCAmelCase__ ) != 0: snake_case_ : int = stack[len(lowerCAmelCase__ ) - 1] else: snake_case_ : Union[str, Any] = ss # check if se have reached the starting point if len(lowerCAmelCase__ ) == 0: return sorted_nodes def _A ( self :Dict ) -> Any: '''simple docstring''' snake_case_ : Dict = [] snake_case_ : Any = [] snake_case_ : str = list(self.graph )[0] stack.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) snake_case_ : Optional[int] = -2 snake_case_ : Any = [] snake_case_ : List[Any] = s snake_case_ : int = False snake_case_ : Optional[int] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: snake_case_ : List[Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): snake_case_ : Any = len(lowerCAmelCase__ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) snake_case_ : Optional[int] = node[1] break # check if all the children are visited if s == ss: stack.pop() snake_case_ : Optional[Any] = True if len(lowerCAmelCase__ ) != 0: snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1] else: snake_case_ : str = False indirect_parents.append(lowerCAmelCase__ ) snake_case_ : List[str] = s snake_case_ : Optional[int] = ss # check if se have reached the starting point if len(lowerCAmelCase__ ) == 0: return list(lowerCAmelCase__ ) def _A ( self :Tuple ) -> List[str]: '''simple docstring''' snake_case_ : List[Any] = [] snake_case_ : Tuple = [] snake_case_ : List[str] = list(self.graph )[0] stack.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) snake_case_ : str = -2 snake_case_ : List[str] = [] snake_case_ : List[Any] = s snake_case_ : List[str] = False snake_case_ : Dict = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: snake_case_ : List[Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): snake_case_ : Any = len(lowerCAmelCase__ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) snake_case_ : str = node[1] break # check if all the children are visited if s == ss: stack.pop() snake_case_ : Tuple = True if len(lowerCAmelCase__ ) != 0: snake_case_ : List[Any] = stack[len(lowerCAmelCase__ ) - 1] else: snake_case_ : Optional[int] = False indirect_parents.append(lowerCAmelCase__ ) snake_case_ : int = s snake_case_ : Union[str, Any] = ss # check if se have reached the starting point if len(lowerCAmelCase__ ) == 0: return False def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[int]=-2 , lowerCAmelCase__ :Tuple=-1 ) -> str: '''simple docstring''' snake_case_ : Optional[int] = time() self.dfs(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : Optional[Any] = time() return end - begin def _A ( self :Any , lowerCAmelCase__ :Tuple=-2 ) -> Optional[Any]: '''simple docstring''' snake_case_ : Any = time() self.bfs(lowerCAmelCase__ ) snake_case_ : Any = time() return end - begin class A_ : """simple docstring""" def __init__( self :Tuple ) -> List[str]: '''simple docstring''' snake_case_ : Optional[Any] = {} def _A ( self :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=1 ) -> str: '''simple docstring''' if self.graph.get(lowerCAmelCase__ ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist snake_case_ : str = [[w, v]] # add the other way if self.graph.get(lowerCAmelCase__ ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist snake_case_ : List[str] = [[w, u]] def _A ( self :Dict , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]: '''simple docstring''' if self.graph.get(lowerCAmelCase__ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowerCAmelCase__ ) # the other way round if self.graph.get(lowerCAmelCase__ ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(lowerCAmelCase__ ) def _A ( self :Optional[Any] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> int: '''simple docstring''' if s == d: return [] snake_case_ : Any = [] snake_case_ : Dict = [] if s == -2: snake_case_ : Optional[int] = list(self.graph )[0] stack.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) snake_case_ : Tuple = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: snake_case_ : List[str] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowerCAmelCase__ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) snake_case_ : str = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowerCAmelCase__ ) != 0: snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1] else: snake_case_ : str = ss # check if se have reached the starting point if len(lowerCAmelCase__ ) == 0: return visited def _A ( self :Optional[int] , lowerCAmelCase__ :str=-1 ) -> List[Any]: '''simple docstring''' if c == -1: snake_case_ : Optional[int] = floor(random() * 10_000 ) + 10 for i in range(lowerCAmelCase__ ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): snake_case_ : str = floor(random() * c ) + 1 if n != i: self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 ) def _A ( self :Any , lowerCAmelCase__ :Optional[Any]=-2 ) -> List[Any]: '''simple docstring''' snake_case_ : List[str] = deque() snake_case_ : Optional[Any] = [] if s == -2: snake_case_ : List[Any] = list(self.graph )[0] d.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) while d: snake_case_ : Optional[int] = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _A ( self :str , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]: '''simple docstring''' return len(self.graph[u] ) def _A ( self :Union[str, Any] ) -> Dict: '''simple docstring''' snake_case_ : Any = [] snake_case_ : Optional[Any] = [] snake_case_ : Optional[Any] = list(self.graph )[0] stack.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) snake_case_ : Tuple = -2 snake_case_ : Optional[int] = [] snake_case_ : Tuple = s snake_case_ : Optional[Any] = False snake_case_ : Optional[int] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: snake_case_ : Optional[Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): snake_case_ : Optional[int] = len(lowerCAmelCase__ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) snake_case_ : Tuple = node[1] break # check if all the children are visited if s == ss: stack.pop() snake_case_ : Optional[int] = True if len(lowerCAmelCase__ ) != 0: snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1] else: snake_case_ : Optional[int] = False indirect_parents.append(lowerCAmelCase__ ) snake_case_ : List[Any] = s snake_case_ : Dict = ss # check if se have reached the starting point if len(lowerCAmelCase__ ) == 0: return list(lowerCAmelCase__ ) def _A ( self :Optional[Any] ) -> Tuple: '''simple docstring''' snake_case_ : Optional[Any] = [] snake_case_ : int = [] snake_case_ : List[str] = list(self.graph )[0] stack.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) snake_case_ : Tuple = -2 snake_case_ : int = [] snake_case_ : int = s snake_case_ : Optional[Any] = False snake_case_ : List[Any] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: snake_case_ : Union[str, Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): snake_case_ : Tuple = len(lowerCAmelCase__ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) snake_case_ : Optional[Any] = node[1] break # check if all the children are visited if s == ss: stack.pop() snake_case_ : Optional[Any] = True if len(lowerCAmelCase__ ) != 0: snake_case_ : Tuple = stack[len(lowerCAmelCase__ ) - 1] else: snake_case_ : Optional[int] = False indirect_parents.append(lowerCAmelCase__ ) snake_case_ : Union[str, Any] = s snake_case_ : Tuple = ss # check if se have reached the starting point if len(lowerCAmelCase__ ) == 0: return False def _A ( self :Any ) -> Tuple: '''simple docstring''' return list(self.graph ) def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> str: '''simple docstring''' snake_case_ : List[str] = time() self.dfs(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : List[Any] = time() return end - begin def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any]=-2 ) -> int: '''simple docstring''' snake_case_ : List[str] = time() self.bfs(lowerCAmelCase__ ) snake_case_ : Tuple = time() return end - begin
656
1
'''simple docstring''' import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin __lowerCamelCase : int = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.plbart.modeling_plbart import shift_tokens_right __lowerCamelCase : Any = 50003 __lowerCamelCase : List[str] = 50002 @require_sentencepiece @require_tokenizers class A_ (a_ , unittest.TestCase ): """simple docstring""" a__ = PLBartTokenizer a__ = None a__ = False def _A ( self :List[Any] ) -> Optional[int]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing snake_case_ : int = PLBartTokenizer(lowerCAmelCase__ , language_codes="base" , keep_accents=lowerCAmelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def _A ( self :Tuple ) -> int: '''simple docstring''' snake_case_ : str = PLBartTokenizer(lowerCAmelCase__ , language_codes="base" , keep_accents=lowerCAmelCase__ ) snake_case_ : List[str] = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) snake_case_ : int = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowerCAmelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) snake_case_ : str = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) self.assertListEqual( lowerCAmelCase__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) snake_case_ : int = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ ) self.assertListEqual( lowerCAmelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) snake_case_ : int = tokenizer.vocab_size snake_case_ : List[str] = [tokenizer.convert_ids_to_tokens(lowerCAmelCase__ ) for x in range(end - 4 , lowerCAmelCase__ )] self.assertListEqual(lowerCAmelCase__ , ["__java__", "__python__", "__en_XX__", "<mask>"] ) snake_case_ : List[str] = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go" snake_case_ : List[Any] = tokenizer(lowerCAmelCase__ ).input_ids self.assertEqual( tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ ) , lowerCAmelCase__ , ) def _A ( self :str ) -> Dict: '''simple docstring''' snake_case_ : Optional[Any] = PLBartTokenizer(lowerCAmelCase__ , language_codes="multi" , keep_accents=lowerCAmelCase__ ) snake_case_ : List[Any] = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) snake_case_ : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowerCAmelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) snake_case_ : str = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) self.assertListEqual( lowerCAmelCase__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) snake_case_ : List[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ ) self.assertListEqual( lowerCAmelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) snake_case_ : Dict = tokenizer.vocab_size snake_case_ : List[str] = [tokenizer.convert_ids_to_tokens(lowerCAmelCase__ ) for x in range(end - 7 , lowerCAmelCase__ )] self.assertListEqual( lowerCAmelCase__ , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] ) snake_case_ : Any = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go" snake_case_ : Optional[Any] = tokenizer(lowerCAmelCase__ ).input_ids self.assertEqual( tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ ) , lowerCAmelCase__ , ) @require_torch @require_sentencepiece @require_tokenizers class A_ (unittest.TestCase ): """simple docstring""" a__ = '''uclanlp/plbart-python-en_XX''' a__ = [ '''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''', '''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''', ] a__ = [ '''Returns the maximum value of a b c.''', '''Sums the values of a b c.''', ] a__ = [ 134, 5452, 33460, 33441, 33463, 33465, 33463, 33449, 988, 20, 33456, 19, 33456, 771, 39, 4258, 889, 3318, 33441, 33463, 33465, 33463, 33449, 2471, 2, PYTHON_CODE, ] @classmethod def _A ( cls :int ) -> List[Any]: '''simple docstring''' snake_case_ : PLBartTokenizer = PLBartTokenizer.from_pretrained( cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" ) snake_case_ : Union[str, Any] = 1 return cls def _A ( self :str ) -> Optional[Any]: '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 50_001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 50_002 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 50_003 ) def _A ( self :List[Any] ) -> int: '''simple docstring''' snake_case_ : str = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ ) def _A ( self :List[str] ) -> Optional[int]: '''simple docstring''' self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids ) snake_case_ : List[str] = [EN_CODE, 9_037, 33_442, 57, 752, 153, 14, 56, 18, 9, 2] snake_case_ : str = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) snake_case_ : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ ) def _A ( self :Optional[int] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[str] = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20] self.assertIsInstance(src_text[0] , lowerCAmelCase__ ) snake_case_ : List[Any] = 10 snake_case_ : int = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , lowerCAmelCase__ ) self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) def _A ( self :str ) -> List[str]: '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [50_004, 50_001] ) def _A ( self :Tuple ) -> Tuple: '''simple docstring''' snake_case_ : List[str] = tempfile.mkdtemp() snake_case_ : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowerCAmelCase__ ) snake_case_ : Tuple = PLBartTokenizer.from_pretrained(lowerCAmelCase__ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__ ) @require_torch def _A ( self :int ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[str] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors="pt" ) snake_case_ : Any = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] ) self.assertEqual(batch.decoder_input_ids[1][0] , lowerCAmelCase__ ) self.assertEqual(batch.decoder_input_ids[1][-1] , 2 ) self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] ) @require_torch def _A ( self :Tuple ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[Any] = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , ) snake_case_ : Tuple = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertEqual((2, 26) , batch.input_ids.shape ) self.assertEqual((2, 26) , batch.attention_mask.shape ) snake_case_ : Optional[int] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] ) def _A ( self :int ) -> List[Any]: '''simple docstring''' snake_case_ : Tuple = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors="pt" ) snake_case_ : List[str] = self.tokenizer( text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors="pt" ) snake_case_ : int = targets["input_ids"] snake_case_ : List[Any] = shift_tokens_right(lowerCAmelCase__ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def _A ( self :Dict ) -> str: '''simple docstring''' snake_case_ : str = self.tokenizer._build_translation_inputs( "A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { # A, test, EOS, en_XX "input_ids": [[150, 242, 2, 50_003]], "attention_mask": [[1, 1, 1, 1]], # java "forced_bos_token_id": 50_001, } , )
656
'''simple docstring''' import gzip import hashlib import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser __lowerCamelCase : List[str] = re.compile(R'''\s+''') def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]: """simple docstring""" return {"hash": hashlib.mda(re.sub(__magic_name__ ,"" ,example["content"] ).encode("utf-8" ) ).hexdigest()} def __UpperCAmelCase ( __magic_name__ )-> str: """simple docstring""" snake_case_ : Optional[Any] = [len(__magic_name__ ) for line in example["content"].splitlines()] return {"line_mean": np.mean(__magic_name__ ), "line_max": max(__magic_name__ )} def __UpperCAmelCase ( __magic_name__ )-> int: """simple docstring""" snake_case_ : Optional[int] = np.mean([c.isalnum() for c in example["content"]] ) return {"alpha_frac": alpha_frac} def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Tuple: """simple docstring""" if example["hash"] in uniques: uniques.remove(example["hash"] ) return True else: return False def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 )-> Tuple: """simple docstring""" snake_case_ : List[str] = ["auto-generated", "autogenerated", "automatically generated"] snake_case_ : Optional[Any] = example["content"].splitlines() for _, line in zip(range(__magic_name__ ) ,__magic_name__ ): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 ,__magic_name__=0.05 )-> Optional[Any]: """simple docstring""" snake_case_ : str = ["unit tests", "test file", "configuration file"] snake_case_ : int = example["content"].splitlines() snake_case_ : Optional[Any] = 0 snake_case_ : Any = 0 # first test for _, line in zip(range(__magic_name__ ) ,__magic_name__ ): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test snake_case_ : Tuple = example["content"].count("\n" ) snake_case_ : int = int(coeff * nlines ) for line in lines: count_config += line.lower().count("config" ) count_test += line.lower().count("test" ) if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def __UpperCAmelCase ( __magic_name__ )-> str: """simple docstring""" snake_case_ : List[Any] = ["def ", "class ", "for ", "while "] snake_case_ : Optional[Any] = example["content"].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def __UpperCAmelCase ( __magic_name__ ,__magic_name__=4 )-> Optional[int]: """simple docstring""" snake_case_ : Tuple = example["content"].splitlines() snake_case_ : Tuple = 0 for line in lines: counter += line.lower().count("=" ) if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def __UpperCAmelCase ( __magic_name__ )-> List[Any]: """simple docstring""" snake_case_ : Tuple = tokenizer(example["content"] ,truncation=__magic_name__ )["input_ids"] snake_case_ : int = len(example["content"] ) / len(__magic_name__ ) return {"ratio": ratio} def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]: """simple docstring""" snake_case_ : Union[str, Any] = {} results.update(get_hash(__magic_name__ ) ) results.update(line_stats(__magic_name__ ) ) results.update(alpha_stats(__magic_name__ ) ) results.update(char_token_ratio(__magic_name__ ) ) results.update(is_autogenerated(__magic_name__ ) ) results.update(is_config_or_test(__magic_name__ ) ) results.update(has_no_keywords(__magic_name__ ) ) results.update(has_few_assignments(__magic_name__ ) ) return results def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Tuple: """simple docstring""" if not check_uniques(__magic_name__ ,__magic_name__ ): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def __UpperCAmelCase ( __magic_name__ )-> Dict: """simple docstring""" with open(__magic_name__ ,"rb" ) as f_in: with gzip.open(str(__magic_name__ ) + ".gz" ,"wb" ,compresslevel=6 ) as f_out: shutil.copyfileobj(__magic_name__ ,__magic_name__ ) os.unlink(__magic_name__ ) # Settings __lowerCamelCase : List[Any] = HfArgumentParser(PreprocessingArguments) __lowerCamelCase : str = parser.parse_args() if args.num_workers is None: __lowerCamelCase : List[Any] = multiprocessing.cpu_count() __lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset __lowerCamelCase : Any = time.time() __lowerCamelCase : str = load_dataset(args.dataset_name, split='''train''') print(f'''Time to load dataset: {time.time()-t_start:.2f}''') # Run preprocessing __lowerCamelCase : List[str] = time.time() __lowerCamelCase : Any = ds.map(preprocess, num_proc=args.num_workers) print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''') # Deduplicate hashes __lowerCamelCase : Any = set(ds.unique('''hash''')) __lowerCamelCase : Optional[int] = len(uniques) / len(ds) print(f'''Fraction of duplicates: {1-frac:.2%}''') # Deduplicate data and apply heuristics __lowerCamelCase : List[str] = time.time() __lowerCamelCase : Tuple = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args}) print(f'''Time to filter dataset: {time.time()-t_start:.2f}''') print(f'''Size of filtered dataset: {len(ds_filter)}''') # Deduplicate with minhash and jaccard similarity if args.near_deduplication: __lowerCamelCase : List[str] = time.time() __lowerCamelCase , __lowerCamelCase : Tuple = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''') print(f'''Size of deduplicate dataset: {len(ds_filter)}''') # Save data in batches of samples_per_file __lowerCamelCase : List[Any] = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / '''duplicate_clusters.json''', '''w''') as f: json.dump(duplicate_clusters, f) __lowerCamelCase : List[str] = output_dir / '''data''' data_dir.mkdir(exist_ok=True) __lowerCamelCase : int = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): __lowerCamelCase : Union[str, Any] = str(data_dir / f'''file-{file_number+1:012}.json''') __lowerCamelCase : List[Any] = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
656
1
'''simple docstring''' import os import zipfile import pytest from datasets.utils.extract import ( BzipaExtractor, Extractor, GzipExtractor, LzaExtractor, SevenZipExtractor, TarExtractor, XzExtractor, ZipExtractor, ZstdExtractor, ) from .utils import require_lza, require_pyazr, require_zstandard @pytest.mark.parametrize( "compression_format, is_archive" ,[ ("7z", True), ("bz2", False), ("gzip", False), ("lz4", False), ("tar", True), ("xz", False), ("zip", True), ("zstd", False), ] ,) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,)-> Optional[int]: """simple docstring""" snake_case_ : List[str] = { "7z": (seven_zip_file, SevenZipExtractor), "bz2": (bza_file, BzipaExtractor), "gzip": (gz_file, GzipExtractor), "lz4": (lza_file, LzaExtractor), "tar": (tar_file, TarExtractor), "xz": (xz_file, XzExtractor), "zip": (zip_file, ZipExtractor), "zstd": (zstd_file, ZstdExtractor), } snake_case_, snake_case_ : str = input_paths_and_base_extractors[compression_format] if input_path is None: snake_case_ : Tuple = F'''for \'{compression_format}\' compression_format, ''' if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(__magic_name__ ) assert base_extractor.is_extractable(__magic_name__ ) snake_case_ : List[Any] = tmp_path / ("extracted" if is_archive else "extracted.txt") base_extractor.extract(__magic_name__ ,__magic_name__ ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name snake_case_ : Optional[int] = file_path.read_text(encoding="utf-8" ) else: snake_case_ : Any = output_path.read_text(encoding="utf-8" ) snake_case_ : Optional[Any] = text_file.read_text(encoding="utf-8" ) assert extracted_file_content == expected_file_content @pytest.mark.parametrize( "compression_format, is_archive" ,[ ("7z", True), ("bz2", False), ("gzip", False), ("lz4", False), ("tar", True), ("xz", False), ("zip", True), ("zstd", False), ] ,) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,)-> Union[str, Any]: """simple docstring""" snake_case_ : Tuple = { "7z": seven_zip_file, "bz2": bza_file, "gzip": gz_file, "lz4": lza_file, "tar": tar_file, "xz": xz_file, "zip": zip_file, "zstd": zstd_file, } snake_case_ : Optional[Any] = input_paths[compression_format] if input_path is None: snake_case_ : Dict = F'''for \'{compression_format}\' compression_format, ''' if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(__magic_name__ ) snake_case_ : str = Extractor.infer_extractor_format(__magic_name__ ) assert extractor_format is not None snake_case_ : Union[str, Any] = tmp_path / ("extracted" if is_archive else "extracted.txt") Extractor.extract(__magic_name__ ,__magic_name__ ,__magic_name__ ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name snake_case_ : Optional[int] = file_path.read_text(encoding="utf-8" ) else: snake_case_ : int = output_path.read_text(encoding="utf-8" ) snake_case_ : List[str] = text_file.read_text(encoding="utf-8" ) assert extracted_file_content == expected_file_content @pytest.fixture def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str: """simple docstring""" import tarfile snake_case_ : List[str] = tmp_path / "data_dot_dot" directory.mkdir() snake_case_ : int = directory / "tar_file_with_dot_dot.tar" with tarfile.TarFile(__magic_name__ ,"w" ) as f: f.add(__magic_name__ ,arcname=os.path.join(".." ,text_file.name ) ) return path @pytest.fixture def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]: """simple docstring""" import tarfile snake_case_ : Optional[int] = tmp_path / "data_sym_link" directory.mkdir() snake_case_ : Union[str, Any] = directory / "tar_file_with_sym_link.tar" os.symlink(".." ,directory / "subdir" ,target_is_directory=__magic_name__ ) with tarfile.TarFile(__magic_name__ ,"w" ) as f: f.add(str(directory / "subdir" ) ,arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8 return path @pytest.mark.parametrize( "insecure_tar_file, error_log" ,[("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")] ,) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> Dict: """simple docstring""" snake_case_ : str = { "tar_file_with_dot_dot": tar_file_with_dot_dot, "tar_file_with_sym_link": tar_file_with_sym_link, } snake_case_ : str = insecure_tar_files[insecure_tar_file] snake_case_ : List[Any] = tmp_path / "extracted" TarExtractor.extract(__magic_name__ ,__magic_name__ ) assert caplog.text for record in caplog.records: assert record.levelname == "ERROR" assert error_log in record.msg def __UpperCAmelCase ( __magic_name__ )-> str: """simple docstring""" snake_case_ : Union[str, Any] = tmpdir / "not_a_zip_file" # From: https://github.com/python/cpython/pull/5053 snake_case_ : Union[str, Any] = ( B"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00" B"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I" B"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07" B"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82" ) with not_a_zip_file.open("wb" ) as f: f.write(__magic_name__ ) assert zipfile.is_zipfile(str(__magic_name__ ) ) # is a false positive for `zipfile` assert not ZipExtractor.is_extractable(__magic_name__ ) # but we're right
656
'''simple docstring''' import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class A_ (unittest.TestCase ): """simple docstring""" def _A ( self :str ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Union[str, Any] = torch.nn.Linear(10 , 10 ) snake_case_ : Dict = torch.optim.SGD(model.parameters() , 0.1 ) snake_case_ : Tuple = Accelerator() snake_case_ : Optional[Any] = accelerator.prepare(lowerCAmelCase__ ) try: pickle.loads(pickle.dumps(lowerCAmelCase__ ) ) except Exception as e: self.fail(F'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
656
1
'''simple docstring''' import argparse import importlib from pathlib import Path # Test all the extensions added in the setup __lowerCamelCase : int = [ '''kernels/rwkv/wkv_cuda.cu''', '''kernels/rwkv/wkv_op.cpp''', '''kernels/deformable_detr/ms_deform_attn.h''', '''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''', '''models/graphormer/algos_graphormer.pyx''', ] def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]: """simple docstring""" for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": __lowerCamelCase : List[str] = argparse.ArgumentParser() parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''') __lowerCamelCase : List[Any] = parser.parse_args() if args.check_lib: __lowerCamelCase : Union[str, Any] = importlib.import_module('''transformers''') __lowerCamelCase : int = Path(transformers_module.__file__).parent else: __lowerCamelCase : Optional[int] = Path.cwd() / '''build/lib/transformers''' if not test_custom_files_are_present(transformers_path): raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
656
'''simple docstring''' import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py __lowerCamelCase : Any = '''src/transformers''' # This is to make sure the transformers module imported is the one in the repo. __lowerCamelCase : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS) __lowerCamelCase : Optional[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` __lowerCamelCase : Union[str, Any] = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''') __lowerCamelCase : Any = { '''DecisionTransformerConfig''', '''EncoderDecoderConfig''', '''MusicgenConfig''', '''RagConfig''', '''SpeechEncoderDecoderConfig''', '''TimmBackboneConfig''', '''VisionEncoderDecoderConfig''', '''VisionTextDualEncoderConfig''', '''LlamaConfig''', } def __UpperCAmelCase ( __magic_name__ )-> List[Any]: """simple docstring""" snake_case_ : Tuple = None # source code of `config_class` snake_case_ : List[Any] = inspect.getsource(__magic_name__ ) snake_case_ : List[str] = _re_checkpoint.findall(__magic_name__ ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith("/" ): snake_case_ : Optional[Any] = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link snake_case_ : str = F'''https://huggingface.co/{ckpt_name}''' if ckpt_link == ckpt_link_from_name: snake_case_ : Dict = ckpt_name break return checkpoint def __UpperCAmelCase ( )-> Dict: """simple docstring""" snake_case_ : Optional[int] = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue snake_case_ : str = get_checkpoint_from_config_class(__magic_name__ ) snake_case_ : Union[str, Any] = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(__magic_name__ ) if len(__magic_name__ ) > 0: snake_case_ : Tuple = "\n".join(sorted(__magic_name__ ) ) raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
656
1
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : List[Any] = logging.get_logger(__name__) __lowerCamelCase : Tuple = { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''', } class A_ (a_ ): """simple docstring""" a__ = '''xlnet''' a__ = ['''mems'''] a__ = { '''n_token''': '''vocab_size''', # Backward compatibility '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self :Any , lowerCAmelCase__ :Optional[int]=32_000 , lowerCAmelCase__ :Any=1_024 , lowerCAmelCase__ :Tuple=24 , lowerCAmelCase__ :str=16 , lowerCAmelCase__ :Optional[Any]=4_096 , lowerCAmelCase__ :Optional[Any]="gelu" , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :Optional[int]="bi" , lowerCAmelCase__ :Any=0.0_2 , lowerCAmelCase__ :int=1E-1_2 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Any=512 , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :Tuple=-1 , lowerCAmelCase__ :Any=False , lowerCAmelCase__ :Optional[int]="last" , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :str="tanh" , lowerCAmelCase__ :Optional[int]=0.1 , lowerCAmelCase__ :int=5 , lowerCAmelCase__ :Optional[int]=5 , lowerCAmelCase__ :Any=5 , lowerCAmelCase__ :List[Any]=1 , lowerCAmelCase__ :Dict=2 , **lowerCAmelCase__ :Tuple , ) -> Any: '''simple docstring''' snake_case_ : Optional[Any] = vocab_size snake_case_ : int = d_model snake_case_ : List[str] = n_layer snake_case_ : Optional[int] = n_head if d_model % n_head != 0: raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( F'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' ) snake_case_ : Any = d_model // n_head snake_case_ : int = ff_activation snake_case_ : str = d_inner snake_case_ : Optional[Any] = untie_r snake_case_ : Tuple = attn_type snake_case_ : Dict = initializer_range snake_case_ : int = layer_norm_eps snake_case_ : Optional[Any] = dropout snake_case_ : Union[str, Any] = mem_len snake_case_ : Optional[int] = reuse_len snake_case_ : Any = bi_data snake_case_ : Any = clamp_len snake_case_ : int = same_length snake_case_ : Any = summary_type snake_case_ : Dict = summary_use_proj snake_case_ : str = summary_activation snake_case_ : Tuple = summary_last_dropout snake_case_ : int = start_n_top snake_case_ : str = end_n_top snake_case_ : List[Any] = bos_token_id snake_case_ : List[str] = pad_token_id snake_case_ : Optional[int] = eos_token_id if "use_cache" in kwargs: warnings.warn( "The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`" " instead." , lowerCAmelCase__ , ) snake_case_ : Optional[int] = kwargs["use_cache"] snake_case_ : Optional[int] = use_mems_eval snake_case_ : Optional[Any] = use_mems_train super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) @property def _A ( self :List[str] ) -> Any: '''simple docstring''' logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def _A ( self :int , lowerCAmelCase__ :Dict ) -> Union[str, Any]: '''simple docstring''' raise NotImplementedError( F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
656
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : List[str] = logging.get_logger(__name__) __lowerCamelCase : int = { '''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''', # See all Cvt models at https://huggingface.co/models?filter=cvt } class A_ (a_ ): """simple docstring""" a__ = '''cvt''' def __init__( self :List[Any] , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Any=[7, 3, 3] , lowerCAmelCase__ :Dict=[4, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[2, 1, 1] , lowerCAmelCase__ :Any=[64, 192, 384] , lowerCAmelCase__ :List[str]=[1, 3, 6] , lowerCAmelCase__ :str=[1, 2, 10] , lowerCAmelCase__ :Any=[4.0, 4.0, 4.0] , lowerCAmelCase__ :int=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Optional[Any]=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Dict=[0.0, 0.0, 0.1] , lowerCAmelCase__ :List[Any]=[True, True, True] , lowerCAmelCase__ :List[Any]=[False, False, True] , lowerCAmelCase__ :Dict=["dw_bn", "dw_bn", "dw_bn"] , lowerCAmelCase__ :Any=[3, 3, 3] , lowerCAmelCase__ :Tuple=[1, 1, 1] , lowerCAmelCase__ :Optional[int]=[2, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[1, 1, 1] , lowerCAmelCase__ :Any=[1, 1, 1] , lowerCAmelCase__ :List[str]=0.0_2 , lowerCAmelCase__ :Dict=1E-1_2 , **lowerCAmelCase__ :Optional[Any] , ) -> str: '''simple docstring''' super().__init__(**lowerCAmelCase__ ) snake_case_ : int = num_channels snake_case_ : int = patch_sizes snake_case_ : Optional[Any] = patch_stride snake_case_ : Dict = patch_padding snake_case_ : Tuple = embed_dim snake_case_ : Optional[int] = num_heads snake_case_ : Union[str, Any] = depth snake_case_ : Optional[int] = mlp_ratio snake_case_ : Tuple = attention_drop_rate snake_case_ : str = drop_rate snake_case_ : Tuple = drop_path_rate snake_case_ : Any = qkv_bias snake_case_ : Union[str, Any] = cls_token snake_case_ : int = qkv_projection_method snake_case_ : Any = kernel_qkv snake_case_ : Union[str, Any] = padding_kv snake_case_ : str = stride_kv snake_case_ : Dict = padding_q snake_case_ : Tuple = stride_q snake_case_ : Any = initializer_range snake_case_ : Any = layer_norm_eps
656
1
'''simple docstring''' import math def __UpperCAmelCase ( __magic_name__ = 100 )-> int: """simple docstring""" snake_case_ : Union[str, Any] = sum(i * i for i in range(1 ,n + 1 ) ) snake_case_ : int = int(math.pow(sum(range(1 ,n + 1 ) ) ,2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(f'''{solution() = }''')
656
'''simple docstring''' import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets __lowerCamelCase : str = '''\ @inproceedings{snover-etal-2006-study, title = "A Study of Translation Edit Rate with Targeted Human Annotation", author = "Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John", booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers", month = aug # " 8-12", year = "2006", address = "Cambridge, Massachusetts, USA", publisher = "Association for Machine Translation in the Americas", url = "https://aclanthology.org/2006.amta-papers.25", pages = "223--231", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' __lowerCamelCase : Dict = '''\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. ''' __lowerCamelCase : int = ''' Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: \'score\' (float): TER score (num_edits / sum_ref_lengths * 100) \'num_edits\' (int): The cumulative number of edits \'ref_length\' (float): The cumulative average reference length Examples: Example 1: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0} Example 2: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0} Example 3: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5} Example 4: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0} Example 5: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A_ (datasets.Metric ): """simple docstring""" def _A ( self :Optional[Any] ) -> Optional[int]: '''simple docstring''' if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[ "https://github.com/jhclark/tercom", ] , ) def _A ( self :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[str] = len(references[0] ) if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) snake_case_ : List[str] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )] snake_case_ : List[str] = TER( normalized=lowerCAmelCase__ , no_punct=lowerCAmelCase__ , asian_support=lowerCAmelCase__ , case_sensitive=lowerCAmelCase__ , ) snake_case_ : Any = sb_ter.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
656
1
'''simple docstring''' import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A_ (a_ , unittest.TestCase ): """simple docstring""" a__ = MgpstrTokenizer a__ = False a__ = {} a__ = False def _A ( self :List[str] ) -> List[str]: '''simple docstring''' super().setUp() # fmt: off snake_case_ : Dict = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"] # fmt: on snake_case_ : List[str] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCAmelCase__ ) + "\n" ) def _A ( self :Optional[Any] , **lowerCAmelCase__ :Optional[Any] ) -> Dict: '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def _A ( self :Dict , lowerCAmelCase__ :Any ) -> str: '''simple docstring''' snake_case_ : Dict = "tester" snake_case_ : Tuple = "tester" return input_text, output_text @unittest.skip("MGP-STR always lower cases letters." ) def _A ( self :Dict ) -> str: '''simple docstring''' pass def _A ( self :Tuple ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[str] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): snake_case_ : Tuple = "[SPECIAL_TOKEN]" tokenizer.add_special_tokens({"cls_token": special_token} ) snake_case_ : str = tokenizer.encode([special_token] , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(len(lowerCAmelCase__ ) , 1 ) snake_case_ : Tuple = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) self.assertTrue(special_token not in decoded ) def _A ( self :int ) -> List[str]: '''simple docstring''' snake_case_ : Dict = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): snake_case_, snake_case_ : str = self.get_input_output_texts(lowerCAmelCase__ ) snake_case_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__ ) snake_case_ : List[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) snake_case_ : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ ) self.assertNotEqual(len(lowerCAmelCase__ ) , 0 ) snake_case_ : List[str] = tokenizer.decode(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertEqual(text_a.replace(" " , "" ) , lowerCAmelCase__ ) @unittest.skip("MGP-STR tokenizer only handles one sequence." ) def _A ( self :Union[str, Any] ) -> Any: '''simple docstring''' pass @unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" ) def _A ( self :int ) -> Dict: '''simple docstring''' pass
656
'''simple docstring''' from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def __UpperCAmelCase ( )-> int: """simple docstring""" snake_case_ : Any = { "repo_name": ["test_repo1", "test_repo2", "test_repo3"], "path": ["test_1.py", "test_2.py", "unit_test.py"], "content": ["a " * 20, "a " * 30, "b " * 7], } snake_case_ : int = Dataset.from_dict(__magic_name__ ) return dataset class A_ (a_ ): """simple docstring""" def _A ( self :List[str] ) -> str: '''simple docstring''' snake_case_ : Union[str, Any] = get_dataset() snake_case_ : Optional[int] = make_duplicate_clusters(lowerCAmelCase__ , 0.8_5 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def _A ( self :Union[str, Any] ) -> List[str]: '''simple docstring''' snake_case_ : Optional[int] = get_dataset() snake_case_, snake_case_ : List[Any] = deduplicate_dataset(lowerCAmelCase__ ) self.assertEqual(len(lowerCAmelCase__ ) , 2 ) print(lowerCAmelCase__ ) self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 ) self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , lowerCAmelCase__ )
656
1
'''simple docstring''' import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py __lowerCamelCase : str = '''.''' # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) __lowerCamelCase : Tuple = [ '''Assert''', '''AssignVariableOp''', '''EmptyTensorList''', '''MergeV2Checkpoints''', '''ReadVariableOp''', '''ResourceGather''', '''RestoreV2''', '''SaveV2''', '''ShardedFilename''', '''StatefulPartitionedCall''', '''StaticRegexFullMatch''', '''VarHandleOp''', ] def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[str]: """simple docstring""" snake_case_ : Tuple = SavedModel() snake_case_ : Dict = [] with open(os.path.join(__magic_name__ ,"utils" ,"tf_ops" ,"onnx.json" ) ) as f: snake_case_ : Dict = json.load(__magic_name__ )["opsets"] for i in range(1 ,opset + 1 ): onnx_ops.extend(onnx_opsets[str(__magic_name__ )] ) with open(__magic_name__ ,"rb" ) as f: saved_model.ParseFromString(f.read() ) snake_case_ : Tuple = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want snake_case_ : str = sorted(__magic_name__ ) snake_case_ : Optional[int] = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(__magic_name__ ) if strict and len(__magic_name__ ) > 0: raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops ) elif len(__magic_name__ ) > 0: print(F'''Found the following incompatible ops for the opset {opset}:''' ) print(*__magic_name__ ,sep="\n" ) else: print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' ) if __name__ == "__main__": __lowerCamelCase : Any = argparse.ArgumentParser() parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''') parser.add_argument( '''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.''' ) parser.add_argument( '''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.''' ) parser.add_argument( '''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)''' ) __lowerCamelCase : Dict = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
656
'''simple docstring''' from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable __lowerCamelCase : Dict = { '''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''], '''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : int = [ '''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTNeoXJapaneseForCausalLM''', '''GPTNeoXJapaneseLayer''', '''GPTNeoXJapaneseModel''', '''GPTNeoXJapanesePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys __lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
656
1
'''simple docstring''' import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __lowerCamelCase : Any = logging.get_logger(__name__) __lowerCamelCase : int = { '''microsoft/conditional-detr-resnet-50''': ( '''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json''' ), } class A_ (a_ ): """simple docstring""" a__ = '''conditional_detr''' a__ = ['''past_key_values'''] a__ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self :List[Any] , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :Any=3 , lowerCAmelCase__ :Dict=300 , lowerCAmelCase__ :List[Any]=6 , lowerCAmelCase__ :Any=2_048 , lowerCAmelCase__ :Any=8 , lowerCAmelCase__ :Union[str, Any]=6 , lowerCAmelCase__ :Tuple=2_048 , lowerCAmelCase__ :Optional[int]=8 , lowerCAmelCase__ :Dict=0.0 , lowerCAmelCase__ :Any=0.0 , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :List[Any]="relu" , lowerCAmelCase__ :Tuple=256 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :int=0.0 , lowerCAmelCase__ :Optional[Any]=0.0 , lowerCAmelCase__ :Optional[int]=0.0_2 , lowerCAmelCase__ :Dict=1.0 , lowerCAmelCase__ :List[str]=False , lowerCAmelCase__ :List[Any]="sine" , lowerCAmelCase__ :Any="resnet50" , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :List[str]=False , lowerCAmelCase__ :Tuple=2 , lowerCAmelCase__ :List[Any]=5 , lowerCAmelCase__ :Optional[int]=2 , lowerCAmelCase__ :Optional[Any]=1 , lowerCAmelCase__ :List[str]=1 , lowerCAmelCase__ :Union[str, Any]=2 , lowerCAmelCase__ :Tuple=5 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :List[str]=0.2_5 , **lowerCAmelCase__ :Tuple , ) -> Any: '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) snake_case_ : Any = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): snake_case_ : Optional[Any] = backbone_config.get("model_type" ) snake_case_ : Tuple = CONFIG_MAPPING[backbone_model_type] snake_case_ : List[str] = config_class.from_dict(lowerCAmelCase__ ) snake_case_ : int = use_timm_backbone snake_case_ : Union[str, Any] = backbone_config snake_case_ : int = num_channels snake_case_ : int = num_queries snake_case_ : List[Any] = d_model snake_case_ : int = encoder_ffn_dim snake_case_ : str = encoder_layers snake_case_ : str = encoder_attention_heads snake_case_ : List[Any] = decoder_ffn_dim snake_case_ : Union[str, Any] = decoder_layers snake_case_ : List[Any] = decoder_attention_heads snake_case_ : List[Any] = dropout snake_case_ : int = attention_dropout snake_case_ : str = activation_dropout snake_case_ : int = activation_function snake_case_ : str = init_std snake_case_ : Union[str, Any] = init_xavier_std snake_case_ : Optional[Any] = encoder_layerdrop snake_case_ : str = decoder_layerdrop snake_case_ : Tuple = encoder_layers snake_case_ : Dict = auxiliary_loss snake_case_ : str = position_embedding_type snake_case_ : Dict = backbone snake_case_ : Union[str, Any] = use_pretrained_backbone snake_case_ : Tuple = dilation # Hungarian matcher snake_case_ : int = class_cost snake_case_ : Optional[int] = bbox_cost snake_case_ : int = giou_cost # Loss coefficients snake_case_ : Optional[int] = mask_loss_coefficient snake_case_ : Dict = dice_loss_coefficient snake_case_ : Optional[int] = cls_loss_coefficient snake_case_ : Any = bbox_loss_coefficient snake_case_ : List[Any] = giou_loss_coefficient snake_case_ : List[str] = focal_alpha super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ ) @property def _A ( self :Dict ) -> int: '''simple docstring''' return self.encoder_attention_heads @property def _A ( self :Optional[int] ) -> int: '''simple docstring''' return self.d_model def _A ( self :Optional[int] ) -> Optional[Any]: '''simple docstring''' snake_case_ : Optional[Any] = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: snake_case_ : Union[str, Any] = self.backbone_config.to_dict() snake_case_ : str = self.__class__.model_type return output class A_ (a_ ): """simple docstring""" a__ = version.parse('''1.11''' ) @property def _A ( self :Any ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def _A ( self :Dict ) -> float: '''simple docstring''' return 1E-5 @property def _A ( self :Union[str, Any] ) -> int: '''simple docstring''' return 12
656
'''simple docstring''' def __UpperCAmelCase ( __magic_name__ )-> list[int]: """simple docstring""" if length <= 0 or not isinstance(__magic_name__ ,__magic_name__ ): raise ValueError("Length must be a positive integer." ) return [n * (2 * n - 1) for n in range(__magic_name__ )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
656
1
'''simple docstring''' from __future__ import annotations __lowerCamelCase : Optional[Any] = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } class A_ : """simple docstring""" def __init__( self :List[Any] , lowerCAmelCase__ :dict[str, list[str]] , lowerCAmelCase__ :str ) -> None: '''simple docstring''' snake_case_ : List[str] = graph # mapping node to its parent in resulting breadth first tree snake_case_ : dict[str, str | None] = {} snake_case_ : Dict = source_vertex def _A ( self :Optional[int] ) -> None: '''simple docstring''' snake_case_ : int = {self.source_vertex} snake_case_ : Dict = None snake_case_ : Tuple = [self.source_vertex] # first in first out queue while queue: snake_case_ : Dict = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(lowerCAmelCase__ ) snake_case_ : List[Any] = vertex queue.append(lowerCAmelCase__ ) def _A ( self :Optional[Any] , lowerCAmelCase__ :str ) -> str: '''simple docstring''' if target_vertex == self.source_vertex: return self.source_vertex snake_case_ : Any = self.parent.get(lowerCAmelCase__ ) if target_vertex_parent is None: snake_case_ : Optional[Any] = ( F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}''' ) raise ValueError(lowerCAmelCase__ ) return self.shortest_path(lowerCAmelCase__ ) + F'''->{target_vertex}''' if __name__ == "__main__": __lowerCamelCase : Tuple = Graph(graph, '''G''') g.breath_first_search() print(g.shortest_path('''D''')) print(g.shortest_path('''G''')) print(g.shortest_path('''Foo'''))
656
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def __UpperCAmelCase ( __magic_name__=None )-> List[str]: """simple docstring""" if subparsers is not None: snake_case_ : List[str] = subparsers.add_parser("test" ) else: snake_case_ : List[Any] = argparse.ArgumentParser("Accelerate test command" ) parser.add_argument( "--config_file" ,default=__magic_name__ ,help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) ,) if subparsers is not None: parser.set_defaults(func=__magic_name__ ) return parser def __UpperCAmelCase ( __magic_name__ )-> Tuple: """simple docstring""" snake_case_ : Optional[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] ) if args.config_file is None: snake_case_ : str = script_name else: snake_case_ : Any = F'''--config_file={args.config_file} {script_name}''' snake_case_ : Union[str, Any] = ["accelerate-launch"] + test_args.split() snake_case_ : Optional[int] = execute_subprocess_async(__magic_name__ ,env=os.environ.copy() ) if result.returncode == 0: print("Test is a success! You are ready for your distributed training!" ) def __UpperCAmelCase ( )-> int: """simple docstring""" snake_case_ : Dict = test_command_parser() snake_case_ : Dict = parser.parse_args() test_command(__magic_name__ ) if __name__ == "__main__": main()
656
1
'''simple docstring''' from abc import ABC, abstractmethod from typing import List, Optional class A_ (a_ ): """simple docstring""" def __init__( self :Optional[Any] ) -> List[Any]: '''simple docstring''' self.test() def _A ( self :Union[str, Any] ) -> Optional[Any]: '''simple docstring''' snake_case_ : Tuple = 0 snake_case_ : int = False while not completed: if counter == 1: self.reset() snake_case_ : Union[str, Any] = self.advance() if not self.does_advance(lowerCAmelCase__ ): raise Exception( "Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." ) snake_case_, snake_case_, snake_case_ : List[Any] = self.update(lowerCAmelCase__ ) counter += 1 if counter > 10_000: raise Exception("update() does not fulfill the constraint." ) if self.remaining() != 0: raise Exception("Custom Constraint is not defined correctly." ) @abstractmethod def _A ( self :Dict ) -> Optional[Any]: '''simple docstring''' raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def _A ( self :Optional[Any] , lowerCAmelCase__ :int ) -> Any: '''simple docstring''' raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def _A ( self :Dict , lowerCAmelCase__ :int ) -> Tuple: '''simple docstring''' raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def _A ( self :Any ) -> List[str]: '''simple docstring''' raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def _A ( self :Optional[Any] ) -> Union[str, Any]: '''simple docstring''' raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def _A ( self :str , lowerCAmelCase__ :List[str]=False ) -> int: '''simple docstring''' raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class A_ (a_ ): """simple docstring""" def __init__( self :Any , lowerCAmelCase__ :List[int] ) -> List[str]: '''simple docstring''' super(lowerCAmelCase__ , self ).__init__() if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or len(lowerCAmelCase__ ) == 0: raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or token_id < 0) for token_id in token_ids ): raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) snake_case_ : Any = token_ids snake_case_ : Any = len(self.token_ids ) snake_case_ : Tuple = -1 # the index of the currently fulfilled step snake_case_ : Optional[int] = False def _A ( self :List[str] ) -> Any: '''simple docstring''' if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def _A ( self :str , lowerCAmelCase__ :int ) -> Union[str, Any]: '''simple docstring''' if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(lowerCAmelCase__ )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def _A ( self :List[str] , lowerCAmelCase__ :int ) -> str: '''simple docstring''' if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(lowerCAmelCase__ )}''' ) snake_case_ : Any = False snake_case_ : List[Any] = False snake_case_ : Any = False if self.does_advance(lowerCAmelCase__ ): self.fulfilled_idx += 1 snake_case_ : Union[str, Any] = True if self.fulfilled_idx == (self.seqlen - 1): snake_case_ : Dict = True snake_case_ : List[str] = completed else: # failed to make progress. snake_case_ : List[Any] = True self.reset() return stepped, completed, reset def _A ( self :str ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[Any] = False snake_case_ : Dict = 0 def _A ( self :int ) -> Tuple: '''simple docstring''' return self.seqlen - (self.fulfilled_idx + 1) def _A ( self :int , lowerCAmelCase__ :Union[str, Any]=False ) -> str: '''simple docstring''' snake_case_ : List[str] = PhrasalConstraint(self.token_ids ) if stateful: snake_case_ : Dict = self.seqlen snake_case_ : List[Any] = self.fulfilled_idx snake_case_ : Dict = self.completed return new_constraint class A_ : """simple docstring""" def __init__( self :int , lowerCAmelCase__ :List[List[int]] , lowerCAmelCase__ :Any=True ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Dict = max([len(lowerCAmelCase__ ) for one in nested_token_ids] ) snake_case_ : List[str] = {} for token_ids in nested_token_ids: snake_case_ : Union[str, Any] = root for tidx, token_id in enumerate(lowerCAmelCase__ ): if token_id not in level: snake_case_ : int = {} snake_case_ : Optional[Any] = level[token_id] if no_subsets and self.has_subsets(lowerCAmelCase__ , lowerCAmelCase__ ): raise ValueError( "Each list in `nested_token_ids` can't be a complete subset of another list, but is" F''' {nested_token_ids}.''' ) snake_case_ : Union[str, Any] = root def _A ( self :int , lowerCAmelCase__ :Optional[int] ) -> Tuple: '''simple docstring''' snake_case_ : Any = self.trie for current_token in current_seq: snake_case_ : str = start[current_token] snake_case_ : List[Any] = list(start.keys() ) return next_tokens def _A ( self :Optional[int] , lowerCAmelCase__ :str ) -> List[Any]: '''simple docstring''' snake_case_ : Optional[int] = self.next_tokens(lowerCAmelCase__ ) return len(lowerCAmelCase__ ) == 0 def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any] ) -> int: '''simple docstring''' snake_case_ : Optional[int] = list(root.values() ) if len(lowerCAmelCase__ ) == 0: return 1 else: return sum([self.count_leaves(lowerCAmelCase__ ) for nn in next_nodes] ) def _A ( self :int , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Any ) -> str: '''simple docstring''' snake_case_ : str = self.count_leaves(lowerCAmelCase__ ) return len(lowerCAmelCase__ ) != leaf_count class A_ (a_ ): """simple docstring""" def __init__( self :Any , lowerCAmelCase__ :List[List[int]] ) -> List[Any]: '''simple docstring''' super(lowerCAmelCase__ , self ).__init__() if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or len(lowerCAmelCase__ ) == 0: raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for token_ids in nested_token_ids ): raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) snake_case_ : Any = DisjunctiveTrie(lowerCAmelCase__ ) snake_case_ : Optional[int] = nested_token_ids snake_case_ : List[str] = self.trie.max_height snake_case_ : Tuple = [] snake_case_ : int = False def _A ( self :Optional[Any] ) -> Optional[Any]: '''simple docstring''' snake_case_ : Any = self.trie.next_tokens(self.current_seq ) if len(lowerCAmelCase__ ) == 0: return None else: return token_list def _A ( self :Optional[Any] , lowerCAmelCase__ :int ) -> Dict: '''simple docstring''' if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(lowerCAmelCase__ )}''' ) snake_case_ : List[Any] = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def _A ( self :Any , lowerCAmelCase__ :int ) -> Any: '''simple docstring''' if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(lowerCAmelCase__ )}''' ) snake_case_ : Optional[int] = False snake_case_ : Union[str, Any] = False snake_case_ : Tuple = False if self.does_advance(lowerCAmelCase__ ): self.current_seq.append(lowerCAmelCase__ ) snake_case_ : Dict = True else: snake_case_ : Optional[Any] = True self.reset() snake_case_ : Optional[int] = self.trie.reached_leaf(self.current_seq ) snake_case_ : Tuple = completed return stepped, completed, reset def _A ( self :List[str] ) -> Any: '''simple docstring''' snake_case_ : str = False snake_case_ : List[str] = [] def _A ( self :Any ) -> Any: '''simple docstring''' if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def _A ( self :List[Any] , lowerCAmelCase__ :Optional[int]=False ) -> Any: '''simple docstring''' snake_case_ : str = DisjunctiveConstraint(self.token_ids ) if stateful: snake_case_ : int = self.seqlen snake_case_ : Tuple = self.current_seq snake_case_ : Optional[Any] = self.completed return new_constraint class A_ : """simple docstring""" def __init__( self :Optional[Any] , lowerCAmelCase__ :List[Constraint] ) -> Tuple: '''simple docstring''' snake_case_ : Optional[int] = constraints # max # of steps required to fulfill a given constraint snake_case_ : Dict = max([c.seqlen for c in constraints] ) snake_case_ : Optional[Any] = len(lowerCAmelCase__ ) snake_case_ : Optional[Any] = False self.init_state() def _A ( self :str ) -> Optional[Any]: '''simple docstring''' snake_case_ : Any = [] snake_case_ : Optional[int] = None snake_case_ : Any = [constraint.copy(stateful=lowerCAmelCase__ ) for constraint in self.constraints] def _A ( self :int ) -> str: '''simple docstring''' snake_case_ : Union[str, Any] = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def _A ( self :Optional[Any] ) -> Optional[Any]: '''simple docstring''' snake_case_ : Any = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" snake_case_ : Optional[int] = constraint.advance() if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): token_list.append(lowerCAmelCase__ ) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): token_list.extend(lowerCAmelCase__ ) else: snake_case_ : Union[str, Any] = self.inprogress_constraint.advance() if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): token_list.append(lowerCAmelCase__ ) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): token_list.extend(lowerCAmelCase__ ) if len(lowerCAmelCase__ ) == 0: return None else: return token_list def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[List[int]] ) -> List[str]: '''simple docstring''' self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint snake_case_, snake_case_ : Tuple = self.add(lowerCAmelCase__ ) # the entire list of constraints are fulfilled if self.completed: break def _A ( self :int , lowerCAmelCase__ :int ) -> Dict: '''simple docstring''' if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' ) snake_case_, snake_case_ : List[str] = False, False if self.completed: snake_case_ : Any = True snake_case_ : Optional[Any] = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state snake_case_, snake_case_, snake_case_ : Any = self.inprogress_constraint.update(lowerCAmelCase__ ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=lowerCAmelCase__ ) ) snake_case_ : List[Any] = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) snake_case_ : int = None if len(self.pending_constraints ) == 0: # we're done! snake_case_ : List[str] = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(lowerCAmelCase__ ): snake_case_, snake_case_, snake_case_ : Tuple = pending_constraint.update(lowerCAmelCase__ ) if not stepped: raise Exception( "`constraint.update(token_id)` is not yielding incremental progress, " "even though `constraint.does_advance(token_id)` is true." ) if complete: self.complete_constraints.append(lowerCAmelCase__ ) snake_case_ : Union[str, Any] = None if not complete and stepped: snake_case_ : Optional[Any] = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". snake_case_ : Union[str, Any] = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. snake_case_ : Union[str, Any] = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def _A ( self :Union[str, Any] , lowerCAmelCase__ :Dict=True ) -> int: '''simple docstring''' snake_case_ : int = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: snake_case_ : Dict = [ constraint.copy(stateful=lowerCAmelCase__ ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: snake_case_ : Tuple = self.inprogress_constraint.copy(stateful=lowerCAmelCase__ ) snake_case_ : List[str] = [constraint.copy() for constraint in self.pending_constraints] return new_state
656
'''simple docstring''' from scipy.stats import spearmanr import datasets __lowerCamelCase : str = ''' The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. ''' __lowerCamelCase : int = ''' Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {\'spearmanr\': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results[\'spearmanr\']) -0.7 >>> print(round(results[\'spearmanr_pvalue\'], 2)) 0.19 ''' __lowerCamelCase : List[str] = R'''\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A_ (datasets.Metric ): """simple docstring""" def _A ( self :str ) -> Union[str, Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , ) def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any]=False ) -> List[Any]: '''simple docstring''' snake_case_ : Optional[Any] = spearmanr(lowerCAmelCase__ , lowerCAmelCase__ ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
656
1
'''simple docstring''' import warnings from pathlib import Path from typing import List, Tuple, Union import fire from torch import nn from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel from transformers.utils import logging __lowerCamelCase : Any = logging.get_logger(__name__) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> None: """simple docstring""" snake_case_ : Tuple = nn.ModuleList([src_layers[i] for i in layers_to_copy] ) assert len(__magic_name__ ) == len(__magic_name__ ), F'''{len(__magic_name__ )} != {len(__magic_name__ )}''' dest_layers.load_state_dict(layers_to_copy.state_dict() ) __lowerCamelCase : Dict = { # maps num layers in teacher -> num_layers in student -> which teacher layers to copy. # 12: bart, 16: pegasus, 6: marian/Helsinki-NLP 12: { 1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher 2: [0, 6], 3: [0, 6, 11], 4: [0, 4, 8, 11], 6: [0, 2, 4, 7, 9, 11], 9: [0, 1, 2, 4, 5, 7, 9, 10, 11], 12: list(range(12)), }, 16: { # maps num layers in student -> which teacher layers to copy 1: [0], 2: [0, 15], 3: [0, 8, 15], 4: [0, 5, 10, 15], 6: [0, 3, 6, 9, 12, 15], 8: [0, 2, 4, 6, 8, 10, 12, 15], 9: [0, 1, 3, 5, 7, 9, 11, 13, 15], 12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15], 16: list(range(16)), }, 6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))}, } __lowerCamelCase : str = { # maps num layers in student -> which teacher layers to copy. 6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]}, 12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]}, 16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]}, } def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Optional[Any]: """simple docstring""" try: snake_case_ : int = LAYERS_TO_COPY[n_teacher][n_student] return val except KeyError: if n_student != n_teacher: warnings.warn( F'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first''' F''' {n_student}''' ) return list(range(__magic_name__ ) ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[int]: """simple docstring""" if n_student > n_teacher: raise ValueError(F'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' ) elif n_teacher == n_student: return list(range(__magic_name__ ) ) elif n_student == 1: return [n_teacher - 1] else: return LAYERS_TO_SUPERVISE[n_teacher][n_student] def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = "student" ,__magic_name__ = None ,__magic_name__ = None ,__magic_name__=False ,__magic_name__=None ,__magic_name__=None ,**__magic_name__ ,)-> Tuple[PreTrainedModel, List[int], List[int]]: """simple docstring""" snake_case_ : Tuple = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher." assert (e is not None) or (d is not None), _msg if isinstance(__magic_name__ ,__magic_name__ ): AutoTokenizer.from_pretrained(__magic_name__ ).save_pretrained(__magic_name__ ) # purely for convenience snake_case_ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(__magic_name__ ).eval() else: assert isinstance(__magic_name__ ,__magic_name__ ), F'''teacher must be a model or string got type {type(__magic_name__ )}''' snake_case_ : Any = teacher.config.to_diff_dict() try: snake_case_, snake_case_ : Union[str, Any] = teacher.config.encoder_layers, teacher.config.decoder_layers if e is None: snake_case_ : str = teacher_e if d is None: snake_case_ : str = teacher_d init_kwargs.update({"encoder_layers": e, "decoder_layers": d} ) except AttributeError: # T5 if hasattr(teacher.config ,"num_encoder_layers" ): snake_case_, snake_case_ : Optional[int] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers else: snake_case_, snake_case_ : List[Any] = teacher.config.num_layers, teacher.config.num_decoder_layers if e is None: snake_case_ : Tuple = teacher_e if d is None: snake_case_ : Optional[Any] = teacher_d if hasattr(teacher.config ,"num_encoder_layers" ): init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} ) else: init_kwargs.update({"num_layers": e, "num_decoder_layers": d} ) # Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs init_kwargs.update(__magic_name__ ) # Copy weights snake_case_ : Optional[Any] = teacher.config_class(**__magic_name__ ) snake_case_ : str = AutoModelForSeqaSeqLM.from_config(__magic_name__ ) # Start by copying the full teacher state dict this will copy the first N teacher layers to the student. snake_case_ : str = student.load_state_dict(teacher.state_dict() ,strict=__magic_name__ ) assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys. if copy_first_teacher_layers: # Our copying is done. We just log and save snake_case_, snake_case_ : Optional[int] = list(range(__magic_name__ ) ), list(range(__magic_name__ ) ) logger.info( F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to''' F''' {save_path}''' ) student.save_pretrained(__magic_name__ ) return student, e_layers_to_copy, d_layers_to_copy # Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer. if e_layers_to_copy is None: snake_case_ : List[int] = pick_layers_to_copy(__magic_name__ ,__magic_name__ ) if d_layers_to_copy is None: snake_case_ : List[int] = pick_layers_to_copy(__magic_name__ ,__magic_name__ ) try: if hasattr( __magic_name__ ,"prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers copy_layers(teacher.prophetnet.encoder.layers ,student.prophetnet.encoder.layers ,__magic_name__ ) copy_layers(teacher.prophetnet.decoder.layers ,student.prophetnet.decoder.layers ,__magic_name__ ) else: copy_layers(teacher.model.encoder.layers ,student.model.encoder.layers ,__magic_name__ ) copy_layers(teacher.model.decoder.layers ,student.model.decoder.layers ,__magic_name__ ) except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block copy_layers(teacher.encoder.block ,student.encoder.block ,__magic_name__ ) copy_layers(teacher.decoder.block ,student.decoder.block ,__magic_name__ ) logger.info( F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' ) snake_case_ : Optional[Any] = { "teacher_type": teacher.config.model_type, "copied_encoder_layers": e_layers_to_copy, "copied_decoder_layers": d_layers_to_copy, } student.save_pretrained(__magic_name__ ) # Save information about copying for easier reproducibility return student, e_layers_to_copy, d_layers_to_copy if __name__ == "__main__": fire.Fire(create_student_by_copying_alternating_layers)
656
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): __lowerCamelCase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right __lowerCamelCase : str = 128022 __lowerCamelCase : List[Any] = 128028 @require_sentencepiece class A_ (a_ , unittest.TestCase ): """simple docstring""" a__ = MaMaaaTokenizer a__ = False a__ = False a__ = True def _A ( self :Union[str, Any] ) -> List[str]: '''simple docstring''' super().setUp() snake_case_ : int = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"] snake_case_ : Any = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) snake_case_ : Optional[int] = Path(self.tmpdirname ) save_json(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["spm_file"] ) snake_case_ : Union[str, Any] = MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def _A ( self :List[Any] , **lowerCAmelCase__ :List[Any] ) -> str: '''simple docstring''' return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def _A ( self :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[int]: '''simple docstring''' return ( "This is a test", "This is a test", ) def _A ( self :List[str] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : str = "</s>" snake_case_ : Union[str, Any] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ ) def _A ( self :Union[str, Any] ) -> List[str]: '''simple docstring''' snake_case_ : Union[str, Any] = self.get_tokenizer() snake_case_ : Any = list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "</s>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "<s>" ) self.assertEqual(len(lowerCAmelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip("Skip this test while all models are still to be uploaded." ) def _A ( self :List[Any] ) -> Union[str, Any]: '''simple docstring''' pass def _A ( self :Optional[int] ) -> int: '''simple docstring''' snake_case_ : int = self.get_tokenizer() snake_case_ : List[str] = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [2, 3, 4, 5, 6] , ) snake_case_ : Any = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) snake_case_ : Any = tokenizer.convert_tokens_to_string(lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , "This is a test" ) @slow def _A ( self :Any ) -> List[Any]: '''simple docstring''' snake_case_ : int = {"input_ids": [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase__ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , ) @require_torch @require_sentencepiece @require_tokenizers class A_ (unittest.TestCase ): """simple docstring""" a__ = '''facebook/m2m100_418M''' a__ = [ '''In my opinion, there are two levels of response from the French government.''', '''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''', ] a__ = [ '''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''', '''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''', ] # fmt: off a__ = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2] @classmethod def _A ( cls :str ) -> int: '''simple docstring''' snake_case_ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en" , tgt_lang="fr" ) snake_case_ : List[str] = 1 return cls def _A ( self :Tuple ) -> Union[str, Any]: '''simple docstring''' self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 128_006 ) self.assertEqual(self.tokenizer.get_lang_id("en" ) , 128_022 ) self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 128_076 ) self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 128_063 ) def _A ( self :Optional[int] ) -> List[str]: '''simple docstring''' snake_case_ : Dict = self.tokenizer.get_vocab() self.assertEqual(len(lowerCAmelCase__ ) , self.tokenizer.vocab_size ) self.assertEqual(vocab["<unk>"] , 3 ) self.assertIn(self.tokenizer.get_lang_token("en" ) , lowerCAmelCase__ ) def _A ( self :Any ) -> Dict: '''simple docstring''' snake_case_ : List[str] = "en" snake_case_ : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ ) def _A ( self :Union[str, Any] ) -> Dict: '''simple docstring''' self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids ) # fmt: off snake_case_ : Dict = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2] # fmt: on snake_case_ : List[str] = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) snake_case_ : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ ) def _A ( self :Tuple ) -> Tuple: '''simple docstring''' snake_case_ : Union[str, Any] = tempfile.mkdtemp() snake_case_ : int = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(lowerCAmelCase__ ) snake_case_ : List[str] = MaMaaaTokenizer.from_pretrained(lowerCAmelCase__ ) self.assertDictEqual(new_tok.lang_token_to_id , lowerCAmelCase__ ) @require_torch def _A ( self :Optional[Any] ) -> str: '''simple docstring''' snake_case_ : Union[str, Any] = "en" snake_case_ : Tuple = "fr" snake_case_ : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors="pt" ) snake_case_ : Dict = shift_tokens_right( batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: snake_case_ : str = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def _A ( self :Optional[Any] ) -> Tuple: '''simple docstring''' snake_case_ : List[str] = "mr" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) snake_case_ : int = "zh" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def _A ( self :str ) -> int: '''simple docstring''' snake_case_ : Dict = "mr" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) snake_case_ : Tuple = "zh" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def _A ( self :Optional[Any] ) -> Optional[int]: '''simple docstring''' snake_case_ : Optional[int] = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { # en_XX, A, test, EOS "input_ids": [[128_022, 58, 4_183, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 128_006, } , )
656
1
'''simple docstring''' from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class A_ (a_ ): """simple docstring""" a__ = CustomTokenizer pass
656
'''simple docstring''' import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py __lowerCamelCase : str = '''.''' # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) __lowerCamelCase : Tuple = [ '''Assert''', '''AssignVariableOp''', '''EmptyTensorList''', '''MergeV2Checkpoints''', '''ReadVariableOp''', '''ResourceGather''', '''RestoreV2''', '''SaveV2''', '''ShardedFilename''', '''StatefulPartitionedCall''', '''StaticRegexFullMatch''', '''VarHandleOp''', ] def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[str]: """simple docstring""" snake_case_ : Tuple = SavedModel() snake_case_ : Dict = [] with open(os.path.join(__magic_name__ ,"utils" ,"tf_ops" ,"onnx.json" ) ) as f: snake_case_ : Dict = json.load(__magic_name__ )["opsets"] for i in range(1 ,opset + 1 ): onnx_ops.extend(onnx_opsets[str(__magic_name__ )] ) with open(__magic_name__ ,"rb" ) as f: saved_model.ParseFromString(f.read() ) snake_case_ : Tuple = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want snake_case_ : str = sorted(__magic_name__ ) snake_case_ : Optional[int] = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(__magic_name__ ) if strict and len(__magic_name__ ) > 0: raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops ) elif len(__magic_name__ ) > 0: print(F'''Found the following incompatible ops for the opset {opset}:''' ) print(*__magic_name__ ,sep="\n" ) else: print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' ) if __name__ == "__main__": __lowerCamelCase : Any = argparse.ArgumentParser() parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''') parser.add_argument( '''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.''' ) parser.add_argument( '''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.''' ) parser.add_argument( '''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)''' ) __lowerCamelCase : Dict = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
656
1
'''simple docstring''' from pathlib import Path import fire from tqdm import tqdm def __UpperCAmelCase ( __magic_name__="ro" ,__magic_name__="en" ,__magic_name__="wmt16" ,__magic_name__=None )-> None: """simple docstring""" try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError("run pip install datasets" ) snake_case_ : Union[str, Any] = F'''{src_lang}-{tgt_lang}''' print(F'''Converting {dataset}-{pair}''' ) snake_case_ : str = datasets.load_dataset(__magic_name__ ,__magic_name__ ) if save_dir is None: snake_case_ : List[Any] = F'''{dataset}-{pair}''' snake_case_ : Optional[int] = Path(__magic_name__ ) save_dir.mkdir(exist_ok=__magic_name__ ) for split in ds.keys(): print(F'''Splitting {split} with {ds[split].num_rows} records''' ) # to save to val.source, val.target like summary datasets snake_case_ : int = "val" if split == "validation" else split snake_case_ : int = save_dir.joinpath(F'''{fn}.source''' ) snake_case_ : Optional[Any] = save_dir.joinpath(F'''{fn}.target''' ) snake_case_ : Optional[int] = src_path.open("w+" ) snake_case_ : List[Any] = tgt_path.open("w+" ) # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split] ): snake_case_ : Optional[int] = x["translation"] src_fp.write(ex[src_lang] + "\n" ) tgt_fp.write(ex[tgt_lang] + "\n" ) print(F'''Saved {dataset} dataset to {save_dir}''' ) if __name__ == "__main__": fire.Fire(download_wmt_dataset)
656
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal __lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__) __lowerCamelCase : List[str] = ['''names''', '''prefix'''] __lowerCamelCase : int = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols'''] __lowerCamelCase : str = ['''encoding_errors''', '''on_bad_lines'''] __lowerCamelCase : Optional[Any] = ['''date_format'''] @dataclass class A_ (datasets.BuilderConfig ): """simple docstring""" a__ = "," a__ = None a__ = "infer" a__ = None a__ = None a__ = None a__ = None a__ = None a__ = True a__ = None a__ = None a__ = None a__ = None a__ = False a__ = None a__ = None a__ = None a__ = True a__ = True a__ = False a__ = True a__ = None a__ = "." a__ = None a__ = '"' a__ = 0 a__ = None a__ = None a__ = None a__ = None a__ = True a__ = True a__ = 0 a__ = True a__ = False a__ = None a__ = 10000 a__ = None a__ = "strict" a__ = "error" a__ = None def _A ( self :List[str] ) -> Any: '''simple docstring''' if self.delimiter is not None: snake_case_ : Tuple = self.delimiter if self.column_names is not None: snake_case_ : List[Any] = self.column_names @property def _A ( self :Optional[Any] ) -> int: '''simple docstring''' snake_case_ : Optional[int] = { "sep": self.sep, "header": self.header, "names": self.names, "index_col": self.index_col, "usecols": self.usecols, "prefix": self.prefix, "mangle_dupe_cols": self.mangle_dupe_cols, "engine": self.engine, "converters": self.converters, "true_values": self.true_values, "false_values": self.false_values, "skipinitialspace": self.skipinitialspace, "skiprows": self.skiprows, "nrows": self.nrows, "na_values": self.na_values, "keep_default_na": self.keep_default_na, "na_filter": self.na_filter, "verbose": self.verbose, "skip_blank_lines": self.skip_blank_lines, "thousands": self.thousands, "decimal": self.decimal, "lineterminator": self.lineterminator, "quotechar": self.quotechar, "quoting": self.quoting, "escapechar": self.escapechar, "comment": self.comment, "encoding": self.encoding, "dialect": self.dialect, "error_bad_lines": self.error_bad_lines, "warn_bad_lines": self.warn_bad_lines, "skipfooter": self.skipfooter, "doublequote": self.doublequote, "memory_map": self.memory_map, "float_precision": self.float_precision, "chunksize": self.chunksize, "encoding_errors": self.encoding_errors, "on_bad_lines": self.on_bad_lines, "date_format": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class A_ (datasets.ArrowBasedBuilder ): """simple docstring""" a__ = CsvConfig def _A ( self :Optional[Any] ) -> Optional[Any]: '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def _A ( self :Tuple , lowerCAmelCase__ :Dict ) -> List[Any]: '''simple docstring''' if not self.config.data_files: raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) snake_case_ : Optional[Any] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(lowerCAmelCase__ , (str, list, tuple) ): snake_case_ : int = data_files if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): snake_case_ : List[str] = [files] snake_case_ : Tuple = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] snake_case_ : str = [] for split_name, files in data_files.items(): if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): snake_case_ : str = [files] snake_case_ : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files] splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) ) return splits def _A ( self :List[Any] , lowerCAmelCase__ :pa.Table ) -> pa.Table: '''simple docstring''' if self.config.features is not None: snake_case_ : int = self.config.features.arrow_schema if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ): # cheaper cast snake_case_ : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example snake_case_ : Dict = table_cast(lowerCAmelCase__ , lowerCAmelCase__ ) return pa_table def _A ( self :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]: '''simple docstring''' snake_case_ : Tuple = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str snake_case_ : str = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ): snake_case_ : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(lowerCAmelCase__ ): snake_case_ : Optional[int] = pa.Table.from_pandas(lowerCAmelCase__ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ ) except ValueError as e: logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCAmelCase__ )}: {e}''' ) raise
656
1
'''simple docstring''' import heapq as hq import math from collections.abc import Iterator class A_ : """simple docstring""" def __init__( self :Optional[int] , lowerCAmelCase__ :List[Any] ) -> Optional[int]: '''simple docstring''' snake_case_ : Optional[Any] = str(id_ ) snake_case_ : Union[str, Any] = None snake_case_ : Union[str, Any] = None snake_case_ : Optional[Any] = [] snake_case_ : List[str] = {} # {vertex:distance} def __lt__( self :List[Any] , lowerCAmelCase__ :Tuple ) -> List[str]: '''simple docstring''' return self.key < other.key def __repr__( self :Optional[Any] ) -> Dict: '''simple docstring''' return self.id def _A ( self :List[Any] , lowerCAmelCase__ :str ) -> Dict: '''simple docstring''' self.neighbors.append(lowerCAmelCase__ ) def _A ( self :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[int] ) -> List[str]: '''simple docstring''' snake_case_ : Any = weight def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> Any: """simple docstring""" graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] ,__magic_name__ ) graph[b - 1].add_edge(graph[a - 1] ,__magic_name__ ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> list: """simple docstring""" snake_case_ : Union[str, Any] = [] for u in graph: snake_case_ : Optional[int] = math.inf snake_case_ : Union[str, Any] = None snake_case_ : List[Any] = 0 snake_case_ : Optional[Any] = graph[:] while q: snake_case_ : List[Any] = min(__magic_name__ ) q.remove(__magic_name__ ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): snake_case_ : Any = u snake_case_ : int = u.edges[v.id] for i in range(1 ,len(__magic_name__ ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Iterator[tuple]: """simple docstring""" for u in graph: snake_case_ : Any = math.inf snake_case_ : str = None snake_case_ : List[str] = 0 snake_case_ : Dict = list(__magic_name__ ) hq.heapify(__magic_name__ ) while h: snake_case_ : Union[str, Any] = hq.heappop(__magic_name__ ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): snake_case_ : Tuple = u snake_case_ : Any = u.edges[v.id] hq.heapify(__magic_name__ ) for i in range(1 ,len(__magic_name__ ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def __UpperCAmelCase ( )-> None: """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
656
'''simple docstring''' import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A_ (a_ , unittest.TestCase ): """simple docstring""" a__ = MgpstrTokenizer a__ = False a__ = {} a__ = False def _A ( self :List[str] ) -> List[str]: '''simple docstring''' super().setUp() # fmt: off snake_case_ : Dict = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"] # fmt: on snake_case_ : List[str] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCAmelCase__ ) + "\n" ) def _A ( self :Optional[Any] , **lowerCAmelCase__ :Optional[Any] ) -> Dict: '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def _A ( self :Dict , lowerCAmelCase__ :Any ) -> str: '''simple docstring''' snake_case_ : Dict = "tester" snake_case_ : Tuple = "tester" return input_text, output_text @unittest.skip("MGP-STR always lower cases letters." ) def _A ( self :Dict ) -> str: '''simple docstring''' pass def _A ( self :Tuple ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[str] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): snake_case_ : Tuple = "[SPECIAL_TOKEN]" tokenizer.add_special_tokens({"cls_token": special_token} ) snake_case_ : str = tokenizer.encode([special_token] , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(len(lowerCAmelCase__ ) , 1 ) snake_case_ : Tuple = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) self.assertTrue(special_token not in decoded ) def _A ( self :int ) -> List[str]: '''simple docstring''' snake_case_ : Dict = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): snake_case_, snake_case_ : str = self.get_input_output_texts(lowerCAmelCase__ ) snake_case_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__ ) snake_case_ : List[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) snake_case_ : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ ) self.assertNotEqual(len(lowerCAmelCase__ ) , 0 ) snake_case_ : List[str] = tokenizer.decode(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertEqual(text_a.replace(" " , "" ) , lowerCAmelCase__ ) @unittest.skip("MGP-STR tokenizer only handles one sequence." ) def _A ( self :Union[str, Any] ) -> Any: '''simple docstring''' pass @unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" ) def _A ( self :int ) -> Dict: '''simple docstring''' pass
656
1
'''simple docstring''' from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( '''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , a_ , ) class A_ (a_ ): """simple docstring""" a__ = RobertaConfig a__ = '''roberta''' def __init__( self :List[Any] , lowerCAmelCase__ :Union[str, Any] ) -> List[Any]: '''simple docstring''' super().__init__(lowerCAmelCase__ ) snake_case_ : int = RobertaEmbeddings(lowerCAmelCase__ ) self.init_weights() @add_start_docstrings( '''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top, also takes care of multi-layer training. ''' , a_ , ) class A_ (a_ ): """simple docstring""" a__ = RobertaConfig a__ = '''roberta''' def __init__( self :Union[str, Any] , lowerCAmelCase__ :int ) -> Dict: '''simple docstring''' super().__init__(lowerCAmelCase__ ) snake_case_ : Tuple = config.num_labels snake_case_ : List[str] = config.num_hidden_layers snake_case_ : Optional[int] = DeeRobertaModel(lowerCAmelCase__ ) snake_case_ : Optional[int] = nn.Dropout(config.hidden_dropout_prob ) snake_case_ : Optional[int] = nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(lowerCAmelCase__ ) def _A ( self :Union[str, Any] , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :str=None , lowerCAmelCase__ :List[Any]=-1 , lowerCAmelCase__ :Union[str, Any]=False , ) -> Optional[Any]: '''simple docstring''' snake_case_ : Dict = self.num_layers try: snake_case_ : Optional[Any] = self.roberta( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , ) snake_case_ : str = outputs[1] snake_case_ : Any = self.dropout(lowerCAmelCase__ ) snake_case_ : str = self.classifier(lowerCAmelCase__ ) snake_case_ : Optional[int] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: snake_case_ : str = e.message snake_case_ : Any = e.exit_layer snake_case_ : List[Any] = outputs[0] if not self.training: snake_case_ : Tuple = entropy(lowerCAmelCase__ ) snake_case_ : Any = [] snake_case_ : Optional[Any] = [] if labels is not None: if self.num_labels == 1: # We are doing regression snake_case_ : Optional[Any] = MSELoss() snake_case_ : str = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: snake_case_ : Tuple = CrossEntropyLoss() snake_case_ : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits snake_case_ : Union[str, Any] = [] for highway_exit in outputs[-1]: snake_case_ : Optional[Any] = highway_exit[0] if not self.training: highway_logits_all.append(lowerCAmelCase__ ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression snake_case_ : Tuple = MSELoss() snake_case_ : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: snake_case_ : Dict = CrossEntropyLoss() snake_case_ : Dict = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(lowerCAmelCase__ ) if train_highway: snake_case_ : Any = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: snake_case_ : Union[str, Any] = (loss,) + outputs if not self.training: snake_case_ : Tuple = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: snake_case_ : Union[str, Any] = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
656
'''simple docstring''' from __future__ import annotations import math import numpy as np from numpy.linalg import norm def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float: """simple docstring""" return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__magic_name__ ,__magic_name__ ) ) ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> list[list[list[float] | float]]: """simple docstring""" if dataset.ndim != value_array.ndim: snake_case_ : int = ( "Wrong input data's dimensions... " F'''dataset : {dataset.ndim}, value_array : {value_array.ndim}''' ) raise ValueError(__magic_name__ ) try: if dataset.shape[1] != value_array.shape[1]: snake_case_ : Dict = ( "Wrong input data's shape... " F'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}''' ) raise ValueError(__magic_name__ ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError("Wrong shape" ) if dataset.dtype != value_array.dtype: snake_case_ : Dict = ( "Input data have different datatype... " F'''dataset : {dataset.dtype}, value_array : {value_array.dtype}''' ) raise TypeError(__magic_name__ ) snake_case_ : Optional[int] = [] for value in value_array: snake_case_ : List[str] = euclidean(__magic_name__ ,dataset[0] ) snake_case_ : int = dataset[0].tolist() for dataset_value in dataset[1:]: snake_case_ : Optional[Any] = euclidean(__magic_name__ ,__magic_name__ ) if dist > temp_dist: snake_case_ : Tuple = temp_dist snake_case_ : Optional[int] = dataset_value.tolist() answer.append([vector, dist] ) return answer def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float: """simple docstring""" return np.dot(__magic_name__ ,__magic_name__ ) / (norm(__magic_name__ ) * norm(__magic_name__ )) if __name__ == "__main__": import doctest doctest.testmod()
656
1
'''simple docstring''' import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline __lowerCamelCase : Union[str, Any] = datasets.utils.logging.get_logger(__name__) @dataclass class A_ (datasets.BuilderConfig ): """simple docstring""" a__ = None a__ = "utf-8" a__ = None a__ = None a__ = True # deprecated a__ = None # deprecated a__ = 10 << 20 # 10MB a__ = None class A_ (datasets.ArrowBasedBuilder ): """simple docstring""" a__ = JsonConfig def _A ( self :Optional[int] ) -> str: '''simple docstring''' if self.config.block_size is not None: logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead" ) snake_case_ : Any = self.config.block_size if self.config.use_threads is not True: logger.warning( "The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." ) if self.config.newlines_in_values is not None: raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported" ) return datasets.DatasetInfo(features=self.config.features ) def _A ( self :List[Any] , lowerCAmelCase__ :List[Any] ) -> int: '''simple docstring''' if not self.config.data_files: raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) snake_case_ : Dict = dl_manager.download_and_extract(self.config.data_files ) if isinstance(lowerCAmelCase__ , (str, list, tuple) ): snake_case_ : List[str] = data_files if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): snake_case_ : Any = [files] snake_case_ : Optional[int] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] snake_case_ : Optional[Any] = [] for split_name, files in data_files.items(): if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): snake_case_ : Union[str, Any] = [files] snake_case_ : Dict = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files] splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) ) return splits def _A ( self :List[Any] , lowerCAmelCase__ :pa.Table ) -> pa.Table: '''simple docstring''' if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): snake_case_ : Optional[int] = self.config.features.arrow_schema.field(lowerCAmelCase__ ).type snake_case_ : Dict = pa_table.append_column(lowerCAmelCase__ , pa.array([None] * len(lowerCAmelCase__ ) , type=lowerCAmelCase__ ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example snake_case_ : List[str] = table_cast(lowerCAmelCase__ , self.config.features.arrow_schema ) return pa_table def _A ( self :str , lowerCAmelCase__ :Optional[int] ) -> int: '''simple docstring''' for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(lowerCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: snake_case_ : Optional[Any] = json.load(lowerCAmelCase__ ) # We keep only the field we are interested in snake_case_ : Dict = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(lowerCAmelCase__ , (list, tuple) ): snake_case_ : Optional[Any] = set().union(*[row.keys() for row in dataset] ) snake_case_ : Any = {col: [row.get(lowerCAmelCase__ ) for row in dataset] for col in keys} else: snake_case_ : str = dataset snake_case_ : List[str] = pa.Table.from_pydict(lowerCAmelCase__ ) yield file_idx, self._cast_table(lowerCAmelCase__ ) # If the file has one json object per line else: with open(lowerCAmelCase__ , "rb" ) as f: snake_case_ : Union[str, Any] = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small snake_case_ : List[str] = max(self.config.chunksize // 32 , 16 << 10 ) snake_case_ : Any = ( self.config.encoding_errors if self.config.encoding_errors is not None else "strict" ) while True: snake_case_ : Dict = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(lowerCAmelCase__ ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": snake_case_ : Union[str, Any] = batch.decode(self.config.encoding , errors=lowerCAmelCase__ ).encode("utf-8" ) try: while True: try: snake_case_ : Dict = paj.read_json( io.BytesIO(lowerCAmelCase__ ) , read_options=paj.ReadOptions(block_size=lowerCAmelCase__ ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(lowerCAmelCase__ , pa.ArrowInvalid ) and "straddling" not in str(lowerCAmelCase__ ) or block_size > len(lowerCAmelCase__ ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( F'''Batch of {len(lowerCAmelCase__ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( lowerCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: snake_case_ : int = json.load(lowerCAmelCase__ ) except json.JSONDecodeError: logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCAmelCase__ )}: {e}''' ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): # list is the only sequence type supported in JSON try: snake_case_ : Union[str, Any] = set().union(*[row.keys() for row in dataset] ) snake_case_ : str = {col: [row.get(lowerCAmelCase__ ) for row in dataset] for col in keys} snake_case_ : Optional[int] = pa.Table.from_pydict(lowerCAmelCase__ ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCAmelCase__ )}: {e}''' ) raise ValueError(F'''Not able to read records in the JSON file at {file}.''' ) from None yield file_idx, self._cast_table(lowerCAmelCase__ ) break else: logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCAmelCase__ )}: {e}''' ) raise ValueError( F'''Not able to read records in the JSON file at {file}. ''' F'''You should probably indicate the field of the JSON file containing your records. ''' F'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. ''' F'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ ) batch_idx += 1
656
'''simple docstring''' import fire from utils import calculate_rouge, save_json def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__=None ,**__magic_name__ )-> Optional[Any]: """simple docstring""" snake_case_ : int = [x.strip() for x in open(__magic_name__ ).readlines()] snake_case_ : Optional[int] = [x.strip() for x in open(__magic_name__ ).readlines()][: len(__magic_name__ )] snake_case_ : List[Any] = calculate_rouge(__magic_name__ ,__magic_name__ ,**__magic_name__ ) if save_path is not None: save_json(__magic_name__ ,__magic_name__ ,indent=__magic_name__ ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
656
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : List[str] = logging.get_logger(__name__) __lowerCamelCase : Dict = { '''facebook/s2t-small-librispeech-asr''': ( '''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json''' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class A_ (a_ ): """simple docstring""" a__ = '''speech_to_text''' a__ = ['''past_key_values'''] a__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self :Optional[Any] , lowerCAmelCase__ :Optional[Any]=10_000 , lowerCAmelCase__ :Optional[Any]=12 , lowerCAmelCase__ :Tuple=2_048 , lowerCAmelCase__ :Optional[int]=4 , lowerCAmelCase__ :Optional[int]=6 , lowerCAmelCase__ :Union[str, Any]=2_048 , lowerCAmelCase__ :Optional[int]=4 , lowerCAmelCase__ :int=0.0 , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :str="relu" , lowerCAmelCase__ :Dict=256 , lowerCAmelCase__ :Optional[int]=0.1 , lowerCAmelCase__ :List[str]=0.0 , lowerCAmelCase__ :List[Any]=0.0 , lowerCAmelCase__ :int=0.0_2 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :str=True , lowerCAmelCase__ :Optional[Any]=1 , lowerCAmelCase__ :Dict=0 , lowerCAmelCase__ :Union[str, Any]=2 , lowerCAmelCase__ :Optional[int]=6_000 , lowerCAmelCase__ :int=1_024 , lowerCAmelCase__ :Union[str, Any]=2 , lowerCAmelCase__ :List[Any]=(5, 5) , lowerCAmelCase__ :Optional[Any]=1_024 , lowerCAmelCase__ :Union[str, Any]=80 , lowerCAmelCase__ :int=1 , **lowerCAmelCase__ :Tuple , ) -> Dict: '''simple docstring''' snake_case_ : int = vocab_size snake_case_ : str = d_model snake_case_ : Optional[Any] = encoder_ffn_dim snake_case_ : Union[str, Any] = encoder_layers snake_case_ : str = encoder_attention_heads snake_case_ : Optional[int] = decoder_ffn_dim snake_case_ : Optional[int] = decoder_layers snake_case_ : Optional[Any] = decoder_attention_heads snake_case_ : int = dropout snake_case_ : List[Any] = attention_dropout snake_case_ : Optional[int] = activation_dropout snake_case_ : List[Any] = activation_function snake_case_ : Optional[int] = init_std snake_case_ : str = encoder_layerdrop snake_case_ : Optional[int] = decoder_layerdrop snake_case_ : List[Any] = use_cache snake_case_ : List[str] = encoder_layers snake_case_ : str = scale_embedding # scale factor will be sqrt(d_model) if True snake_case_ : Dict = max_source_positions snake_case_ : Optional[int] = max_target_positions snake_case_ : Dict = num_conv_layers snake_case_ : Tuple = list(lowerCAmelCase__ ) snake_case_ : str = conv_channels snake_case_ : int = input_feat_per_channel snake_case_ : List[str] = input_channels if len(self.conv_kernel_sizes ) != self.num_conv_layers: raise ValueError( "Configuration for convolutional module is incorrect. " "It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` " F'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, ''' F'''`config.num_conv_layers = {self.num_conv_layers}`.''' ) super().__init__( pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
656
'''simple docstring''' import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) __lowerCamelCase : Optional[Any] = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''', f'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''', f'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias''')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ('''input_proj.weight''', '''input_projection.weight'''), ('''input_proj.bias''', '''input_projection.bias'''), ('''query_embed.weight''', '''query_position_embeddings.weight'''), ('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''), ('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''), ('''class_embed.weight''', '''class_labels_classifier.weight'''), ('''class_embed.bias''', '''class_labels_classifier.bias'''), ('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''), ('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''), ('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''), ('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''), ('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''), ('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''), ('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''), ('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''), ('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''), ('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''), ('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''), ('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''), ('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''), ('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''), ('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''), ('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''), ] ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int: """simple docstring""" snake_case_ : Optional[Any] = state_dict.pop(__magic_name__ ) snake_case_ : Any = val def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]: """simple docstring""" snake_case_ : Any = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: snake_case_ : Optional[Any] = key.replace("backbone.0.body" ,"backbone.conv_encoder.model" ) snake_case_ : int = value else: snake_case_ : int = value return new_state_dict def __UpperCAmelCase ( __magic_name__ ,__magic_name__=False )-> Optional[int]: """simple docstring""" snake_case_ : str = "" if is_panoptic: snake_case_ : Dict = "conditional_detr." # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) snake_case_ : Any = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) snake_case_ : Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict snake_case_ : Tuple = in_proj_weight[:256, :] snake_case_ : List[Any] = in_proj_bias[:256] snake_case_ : Optional[Any] = in_proj_weight[256:512, :] snake_case_ : Optional[int] = in_proj_bias[256:512] snake_case_ : Optional[int] = in_proj_weight[-256:, :] snake_case_ : str = in_proj_bias[-256:] def __UpperCAmelCase ( )-> Optional[Any]: """simple docstring""" snake_case_ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg" snake_case_ : Optional[Any] = Image.open(requests.get(__magic_name__ ,stream=__magic_name__ ).raw ) return im @torch.no_grad() def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[str]: """simple docstring""" snake_case_ : Optional[Any] = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: snake_case_ : Optional[Any] = "resnet101" if "dc5" in model_name: snake_case_ : List[str] = True snake_case_ : Tuple = "panoptic" in model_name if is_panoptic: snake_case_ : List[Any] = 250 else: snake_case_ : Optional[Any] = 91 snake_case_ : Optional[int] = "huggingface/label-files" snake_case_ : Dict = "coco-detection-id2label.json" snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) ) snake_case_ : Optional[int] = {int(__magic_name__ ): v for k, v in idalabel.items()} snake_case_ : int = idalabel snake_case_ : Dict = {v: k for k, v in idalabel.items()} # load image processor snake_case_ : Optional[int] = "coco_panoptic" if is_panoptic else "coco_detection" snake_case_ : str = ConditionalDetrImageProcessor(format=__magic_name__ ) # prepare image snake_case_ : str = prepare_img() snake_case_ : int = image_processor(images=__magic_name__ ,return_tensors="pt" ) snake_case_ : Union[str, Any] = encoding["pixel_values"] logger.info(F'''Converting model {model_name}...''' ) # load original model from torch hub snake_case_ : Union[str, Any] = torch.hub.load("DeppMeng/ConditionalDETR" ,__magic_name__ ,pretrained=__magic_name__ ).eval() snake_case_ : Any = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: snake_case_ : Any = "conditional_detr." + src rename_key(__magic_name__ ,__magic_name__ ,__magic_name__ ) snake_case_ : Tuple = rename_backbone_keys(__magic_name__ ) # query, key and value matrices need special treatment read_in_q_k_v(__magic_name__ ,is_panoptic=__magic_name__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them snake_case_ : int = "conditional_detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("conditional_detr" ) and not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ) ): snake_case_ : Any = state_dict.pop(__magic_name__ ) snake_case_ : Optional[int] = val elif "class_labels_classifier" in key or "bbox_predictor" in key: snake_case_ : Tuple = state_dict.pop(__magic_name__ ) snake_case_ : Any = val elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ): continue else: snake_case_ : Union[str, Any] = state_dict.pop(__magic_name__ ) snake_case_ : List[Any] = val else: if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): snake_case_ : Any = state_dict.pop(__magic_name__ ) snake_case_ : List[Any] = val # finally, create HuggingFace model and load state dict snake_case_ : Optional[int] = ConditionalDetrForSegmentation(__magic_name__ ) if is_panoptic else ConditionalDetrForObjectDetection(__magic_name__ ) model.load_state_dict(__magic_name__ ) model.eval() model.push_to_hub(repo_id=__magic_name__ ,organization="DepuMeng" ,commit_message="Add model" ) # verify our conversion snake_case_ : Dict = conditional_detr(__magic_name__ ) snake_case_ : Union[str, Any] = model(__magic_name__ ) assert torch.allclose(outputs.logits ,original_outputs["pred_logits"] ,atol=1E-4 ) assert torch.allclose(outputs.pred_boxes ,original_outputs["pred_boxes"] ,atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks ,original_outputs["pred_masks"] ,atol=1E-4 ) # Save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) model.save_pretrained(__magic_name__ ) image_processor.save_pretrained(__magic_name__ ) if __name__ == "__main__": __lowerCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''conditional_detr_resnet50''', type=str, help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) __lowerCamelCase : int = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
656
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig __lowerCamelCase : Union[str, Any] = { '''google/tapas-base-finetuned-sqa''': ( '''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json''' ), '''google/tapas-base-finetuned-wtq''': ( '''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json''' ), '''google/tapas-base-finetuned-wikisql-supervised''': ( '''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json''' ), '''google/tapas-base-finetuned-tabfact''': ( '''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json''' ), } class A_ (a_ ): """simple docstring""" a__ = '''tapas''' def __init__( self :Tuple , lowerCAmelCase__ :Union[str, Any]=30_522 , lowerCAmelCase__ :Tuple=768 , lowerCAmelCase__ :Optional[Any]=12 , lowerCAmelCase__ :Union[str, Any]=12 , lowerCAmelCase__ :Optional[int]=3_072 , lowerCAmelCase__ :Union[str, Any]="gelu" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Optional[int]=0.1 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[Any]=[3, 256, 256, 2, 256, 256, 10] , lowerCAmelCase__ :Tuple=0.0_2 , lowerCAmelCase__ :int=1E-1_2 , lowerCAmelCase__ :Tuple=0 , lowerCAmelCase__ :Tuple=1_0.0 , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :str=1.0 , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :List[Any]=1.0 , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :Any=1.0 , lowerCAmelCase__ :List[Any]=1.0 , lowerCAmelCase__ :Dict=False , lowerCAmelCase__ :int=False , lowerCAmelCase__ :Optional[Any]="ratio" , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :int=64 , lowerCAmelCase__ :Union[str, Any]=32 , lowerCAmelCase__ :Dict=False , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :int=False , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :Optional[int]=None , **lowerCAmelCase__ :int , ) -> Union[str, Any]: '''simple docstring''' super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) snake_case_ : Tuple = vocab_size snake_case_ : List[Any] = hidden_size snake_case_ : str = num_hidden_layers snake_case_ : List[Any] = num_attention_heads snake_case_ : Optional[int] = hidden_act snake_case_ : Dict = intermediate_size snake_case_ : List[Any] = hidden_dropout_prob snake_case_ : Dict = attention_probs_dropout_prob snake_case_ : Optional[int] = max_position_embeddings snake_case_ : Union[str, Any] = type_vocab_sizes snake_case_ : Optional[int] = initializer_range snake_case_ : int = layer_norm_eps # Fine-tuning task hyperparameters snake_case_ : Dict = positive_label_weight snake_case_ : List[str] = num_aggregation_labels snake_case_ : Union[str, Any] = aggregation_loss_weight snake_case_ : Tuple = use_answer_as_supervision snake_case_ : Optional[Any] = answer_loss_importance snake_case_ : Dict = use_normalized_answer_loss snake_case_ : Any = huber_loss_delta snake_case_ : Optional[int] = temperature snake_case_ : int = aggregation_temperature snake_case_ : Union[str, Any] = use_gumbel_for_cells snake_case_ : List[Any] = use_gumbel_for_aggregation snake_case_ : Optional[Any] = average_approximation_function snake_case_ : Optional[int] = cell_selection_preference snake_case_ : Union[str, Any] = answer_loss_cutoff snake_case_ : List[str] = max_num_rows snake_case_ : List[Any] = max_num_columns snake_case_ : str = average_logits_per_cell snake_case_ : List[str] = select_one_column snake_case_ : List[str] = allow_empty_column_selection snake_case_ : Dict = init_cell_selection_weights_to_zero snake_case_ : int = reset_position_index_per_cell snake_case_ : Optional[Any] = disable_per_token_loss # Aggregation hyperparameters snake_case_ : List[str] = aggregation_labels snake_case_ : List[Any] = no_aggregation_label_index if isinstance(self.aggregation_labels , lowerCAmelCase__ ): snake_case_ : Tuple = {int(lowerCAmelCase__ ): v for k, v in aggregation_labels.items()}
656
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import XLMRobertaTokenizer from diffusers import ( AltDiffusionImgaImgPipeline, AutoencoderKL, PNDMScheduler, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class A_ (unittest.TestCase ): """simple docstring""" def _A ( self :Any ) -> str: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _A ( self :List[Any] ) -> List[str]: '''simple docstring''' snake_case_ : Any = 1 snake_case_ : Dict = 3 snake_case_ : Union[str, Any] = (32, 32) snake_case_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ ) return image @property def _A ( self :Optional[int] ) -> Any: '''simple docstring''' torch.manual_seed(0 ) snake_case_ : List[str] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) return model @property def _A ( self :Dict ) -> Any: '''simple docstring''' torch.manual_seed(0 ) snake_case_ : Optional[Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) return model @property def _A ( self :Dict ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) snake_case_ : str = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , ) return RobertaSeriesModelWithTransformation(lowerCAmelCase__ ) @property def _A ( self :Any ) -> str: '''simple docstring''' def extract(*lowerCAmelCase__ :Any , **lowerCAmelCase__ :List[str] ): class A_ : """simple docstring""" def __init__( self :Optional[int] ) -> List[str]: '''simple docstring''' snake_case_ : str = torch.ones([0] ) def _A ( self :int , lowerCAmelCase__ :List[Any] ) -> Tuple: '''simple docstring''' self.pixel_values.to(lowerCAmelCase__ ) return self return Out() return extract def _A ( self :int ) -> Dict: '''simple docstring''' snake_case_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator snake_case_ : str = self.dummy_cond_unet snake_case_ : Optional[int] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ ) snake_case_ : Dict = self.dummy_vae snake_case_ : Dict = self.dummy_text_encoder snake_case_ : Optional[int] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" ) snake_case_ : str = 77 snake_case_ : Any = self.dummy_image.to(lowerCAmelCase__ ) snake_case_ : Tuple = init_image / 2 + 0.5 # make sure here that pndm scheduler skips prk snake_case_ : Optional[Any] = AltDiffusionImgaImgPipeline( unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , ) snake_case_ : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ ) snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ ) alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) snake_case_ : Dict = "A painting of a squirrel eating a burger" snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 ) snake_case_ : Dict = alt_pipe( [prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , ) snake_case_ : Any = output.images snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 ) snake_case_ : Optional[Any] = alt_pipe( [prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0] snake_case_ : Tuple = image[0, -3:, -3:, -1] snake_case_ : Dict = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case_ : int = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def _A ( self :int ) -> List[str]: '''simple docstring''' snake_case_ : Union[str, Any] = self.dummy_cond_unet snake_case_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ ) snake_case_ : int = self.dummy_vae snake_case_ : List[Any] = self.dummy_text_encoder snake_case_ : int = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" ) snake_case_ : int = 77 snake_case_ : Dict = self.dummy_image.to(lowerCAmelCase__ ) # put models in fp16 snake_case_ : Optional[Any] = unet.half() snake_case_ : Tuple = vae.half() snake_case_ : List[str] = bert.half() # make sure here that pndm scheduler skips prk snake_case_ : Optional[int] = AltDiffusionImgaImgPipeline( unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , ) snake_case_ : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ ) snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ ) alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) snake_case_ : List[Any] = "A painting of a squirrel eating a burger" snake_case_ : str = torch.manual_seed(0 ) snake_case_ : Any = alt_pipe( [prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , ).images assert image.shape == (1, 32, 32, 3) @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def _A ( self :Optional[int] ) -> Any: '''simple docstring''' snake_case_ : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) # resize to resolution that is divisible by 8 but not 16 or 32 snake_case_ : str = init_image.resize((760, 504) ) snake_case_ : Optional[Any] = "BAAI/AltDiffusion" snake_case_ : int = AltDiffusionImgaImgPipeline.from_pretrained( lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() snake_case_ : Tuple = "A fantasy landscape, trending on artstation" snake_case_ : int = torch.manual_seed(0 ) snake_case_ : List[str] = pipe( prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , ) snake_case_ : str = output.images[0] snake_case_ : List[Any] = image[255:258, 383:386, -1] assert image.shape == (504, 760, 3) snake_case_ : Tuple = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class A_ (unittest.TestCase ): """simple docstring""" def _A ( self :Optional[Any] ) -> Optional[int]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _A ( self :str ) -> Any: '''simple docstring''' snake_case_ : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) snake_case_ : List[Any] = init_image.resize((768, 512) ) snake_case_ : Tuple = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" ) snake_case_ : Any = "BAAI/AltDiffusion" snake_case_ : List[str] = AltDiffusionImgaImgPipeline.from_pretrained( lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() snake_case_ : Tuple = "A fantasy landscape, trending on artstation" snake_case_ : Tuple = torch.manual_seed(0 ) snake_case_ : List[Any] = pipe( prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , ) snake_case_ : Optional[int] = output.images[0] assert image.shape == (512, 768, 3) # img2img is flaky across GPUs even in fp32, so using MAE here assert np.abs(expected_image - image ).max() < 1E-2
656
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCamelCase : Optional[Any] = logging.get_logger(__name__) __lowerCamelCase : str = { '''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''', '''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''', '''kssteven/ibert-roberta-large-mnli''': ( '''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json''' ), } class A_ (a_ ): """simple docstring""" a__ = '''ibert''' def __init__( self :str , lowerCAmelCase__ :Dict=30_522 , lowerCAmelCase__ :str=768 , lowerCAmelCase__ :List[str]=12 , lowerCAmelCase__ :Optional[Any]=12 , lowerCAmelCase__ :Any=3_072 , lowerCAmelCase__ :int="gelu" , lowerCAmelCase__ :Any=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :int=512 , lowerCAmelCase__ :List[str]=2 , lowerCAmelCase__ :Any=0.0_2 , lowerCAmelCase__ :int=1E-1_2 , lowerCAmelCase__ :Dict=1 , lowerCAmelCase__ :List[str]=0 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :List[Any]="absolute" , lowerCAmelCase__ :Dict=False , lowerCAmelCase__ :Dict="none" , **lowerCAmelCase__ :Any , ) -> Optional[Any]: '''simple docstring''' super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) snake_case_ : List[str] = vocab_size snake_case_ : Optional[int] = hidden_size snake_case_ : Optional[Any] = num_hidden_layers snake_case_ : Tuple = num_attention_heads snake_case_ : List[Any] = hidden_act snake_case_ : Optional[int] = intermediate_size snake_case_ : Optional[int] = hidden_dropout_prob snake_case_ : Union[str, Any] = attention_probs_dropout_prob snake_case_ : List[Any] = max_position_embeddings snake_case_ : Dict = type_vocab_size snake_case_ : Union[str, Any] = initializer_range snake_case_ : Dict = layer_norm_eps snake_case_ : List[str] = position_embedding_type snake_case_ : Dict = quant_mode snake_case_ : List[Any] = force_dequant class A_ (a_ ): """simple docstring""" @property def _A ( self :Union[str, Any] ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": snake_case_ : Tuple = {0: "batch", 1: "choice", 2: "sequence"} else: snake_case_ : Any = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
656
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. __lowerCamelCase : List[str] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''} @is_pipeline_test class A_ (unittest.TestCase ): """simple docstring""" a__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING a__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: a__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: a__ = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def _A ( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict ) -> Any: '''simple docstring''' snake_case_ : Optional[Any] = ZeroShotClassificationPipeline( model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , candidate_labels=["polics", "health"] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def _A ( self :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[Any]: '''simple docstring''' snake_case_ : Tuple = classifier("Who are you voting for in 2020?" , candidate_labels="politics" ) self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} ) # No kwarg snake_case_ : List[Any] = classifier("Who are you voting for in 2020?" , ["politics"] ) self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} ) snake_case_ : Dict = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] ) self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} ) snake_case_ : int = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" ) self.assertEqual( lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 ) snake_case_ : Optional[int] = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] ) self.assertEqual( lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 ) snake_case_ : str = classifier( "Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" ) self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} ) # https://github.com/huggingface/transformers/issues/13846 snake_case_ : Dict = classifier(["I am happy"] , ["positive", "negative"] ) self.assertEqual( lowerCAmelCase__ , [ {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} for i in range(1 ) ] , ) snake_case_ : Tuple = classifier(["I am happy", "I am sad"] , ["positive", "negative"] ) self.assertEqual( lowerCAmelCase__ , [ {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} for i in range(2 ) ] , ) with self.assertRaises(lowerCAmelCase__ ): classifier("" , candidate_labels="politics" ) with self.assertRaises(lowerCAmelCase__ ): classifier(lowerCAmelCase__ , candidate_labels="politics" ) with self.assertRaises(lowerCAmelCase__ ): classifier("Who are you voting for in 2020?" , candidate_labels="" ) with self.assertRaises(lowerCAmelCase__ ): classifier("Who are you voting for in 2020?" , candidate_labels=lowerCAmelCase__ ) with self.assertRaises(lowerCAmelCase__ ): classifier( "Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , ) with self.assertRaises(lowerCAmelCase__ ): classifier( "Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=lowerCAmelCase__ , ) self.run_entailment_id(lowerCAmelCase__ ) def _A ( self :List[Any] , lowerCAmelCase__ :Pipeline ) -> Union[str, Any]: '''simple docstring''' snake_case_ : int = zero_shot_classifier.model.config snake_case_ : Optional[int] = config.labelaid snake_case_ : Tuple = zero_shot_classifier.entailment_id snake_case_ : Optional[Any] = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2} self.assertEqual(zero_shot_classifier.entailment_id , -1 ) snake_case_ : Tuple = {"entailment": 0, "neutral": 1, "contradiction": 2} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) snake_case_ : str = {"ENTAIL": 0, "NON-ENTAIL": 1} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) snake_case_ : str = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0} self.assertEqual(zero_shot_classifier.entailment_id , 2 ) snake_case_ : List[str] = original_labelaid self.assertEqual(lowerCAmelCase__ , zero_shot_classifier.entailment_id ) @require_torch def _A ( self :Tuple ) -> Any: '''simple docstring''' snake_case_ : List[Any] = pipeline( "zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( "Who are you voting for in 2020?" * 100 , candidate_labels=["politics", "public health", "science"] ) @require_torch def _A ( self :Optional[Any] ) -> Tuple: '''simple docstring''' snake_case_ : Union[str, Any] = pipeline( "zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , ) snake_case_ : int = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { "sequence": "Who are you voting for in 2020?", "labels": ["science", "public health", "politics"], "scores": [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @require_tf def _A ( self :Union[str, Any] ) -> Dict: '''simple docstring''' snake_case_ : List[str] = pipeline( "zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , ) snake_case_ : Optional[int] = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { "sequence": "Who are you voting for in 2020?", "labels": ["science", "public health", "politics"], "scores": [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @slow @require_torch def _A ( self :Union[str, Any] ) -> int: '''simple docstring''' snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" ) snake_case_ : str = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { "sequence": "Who are you voting for in 2020?", "labels": ["politics", "public health", "science"], "scores": [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) snake_case_ : Optional[int] = zero_shot_classifier( "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks" " in an encoder-decoder configuration. The best performing models also connect the encoder and decoder" " through an attention mechanism. We propose a new simple network architecture, the Transformer, based" " solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two" " machine translation tasks show these models to be superior in quality while being more parallelizable" " and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014" " English-to-German translation task, improving over the existing best results, including ensembles by" " over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new" " single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small" " fraction of the training costs of the best models from the literature. We show that the Transformer" " generalizes well to other tasks by applying it successfully to English constituency parsing both with" " large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { "sequence": ( "The dominant sequence transduction models are based on complex recurrent or convolutional neural" " networks in an encoder-decoder configuration. The best performing models also connect the" " encoder and decoder through an attention mechanism. We propose a new simple network" " architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence" " and convolutions entirely. Experiments on two machine translation tasks show these models to be" " superior in quality while being more parallelizable and requiring significantly less time to" " train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task," " improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014" " English-to-French translation task, our model establishes a new single-model state-of-the-art" " BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training" " costs of the best models from the literature. We show that the Transformer generalizes well to" " other tasks by applying it successfully to English constituency parsing both with large and" " limited training data." ), "labels": ["translation", "machine learning", "vision", "statistics"], "scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , ) @slow @require_tf def _A ( self :List[str] ) -> str: '''simple docstring''' snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" ) snake_case_ : Optional[Any] = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { "sequence": "Who are you voting for in 2020?", "labels": ["politics", "public health", "science"], "scores": [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) snake_case_ : Tuple = zero_shot_classifier( "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks" " in an encoder-decoder configuration. The best performing models also connect the encoder and decoder" " through an attention mechanism. We propose a new simple network architecture, the Transformer, based" " solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two" " machine translation tasks show these models to be superior in quality while being more parallelizable" " and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014" " English-to-German translation task, improving over the existing best results, including ensembles by" " over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new" " single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small" " fraction of the training costs of the best models from the literature. We show that the Transformer" " generalizes well to other tasks by applying it successfully to English constituency parsing both with" " large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { "sequence": ( "The dominant sequence transduction models are based on complex recurrent or convolutional neural" " networks in an encoder-decoder configuration. The best performing models also connect the" " encoder and decoder through an attention mechanism. We propose a new simple network" " architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence" " and convolutions entirely. Experiments on two machine translation tasks show these models to be" " superior in quality while being more parallelizable and requiring significantly less time to" " train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task," " improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014" " English-to-French translation task, our model establishes a new single-model state-of-the-art" " BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training" " costs of the best models from the literature. We show that the Transformer generalizes well to" " other tasks by applying it successfully to English constituency parsing both with large and" " limited training data." ), "labels": ["translation", "machine learning", "vision", "statistics"], "scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , )
656
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : List[Any] = { '''configuration_blenderbot''': [ '''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BlenderbotConfig''', '''BlenderbotOnnxConfig''', ], '''tokenization_blenderbot''': ['''BlenderbotTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : int = ['''BlenderbotTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Union[str, Any] = [ '''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BlenderbotForCausalLM''', '''BlenderbotForConditionalGeneration''', '''BlenderbotModel''', '''BlenderbotPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[str] = [ '''TFBlenderbotForConditionalGeneration''', '''TFBlenderbotModel''', '''TFBlenderbotPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : int = [ '''FlaxBlenderbotForConditionalGeneration''', '''FlaxBlenderbotModel''', '''FlaxBlenderbotPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys __lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
656
'''simple docstring''' import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''): raise Exception('''requires fairseq >= 1.0.0a''') logging.set_verbosity_info() __lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) __lowerCamelCase : Union[str, Any] = '''Hello world! cécé herlolip''' def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[Any]: """simple docstring""" snake_case_ : str = FairseqRobertaModel.from_pretrained(__magic_name__ ) roberta.eval() # disable dropout snake_case_ : Dict = roberta.model.encoder.sentence_encoder snake_case_ : List[str] = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,) if classification_head: snake_case_ : List[str] = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0] print("Our RoBERTa config:" ,__magic_name__ ) snake_case_ : List[str] = XLMRobertaXLForSequenceClassification(__magic_name__ ) if classification_head else XLMRobertaXLForMaskedLM(__magic_name__ ) model.eval() # Now let's copy all the weights. # Embeddings snake_case_ : List[Any] = roberta_sent_encoder.embed_tokens.weight snake_case_ : int = roberta_sent_encoder.embed_positions.weight snake_case_ : Union[str, Any] = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. snake_case_ : Union[str, Any] = roberta_sent_encoder.layer_norm.weight snake_case_ : str = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer snake_case_ : BertLayer = model.roberta.encoder.layer[i] snake_case_ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i] snake_case_ : RobertaAttention = layer.attention snake_case_ : Dict = roberta_layer.self_attn_layer_norm.weight snake_case_ : Dict = roberta_layer.self_attn_layer_norm.bias # self attention snake_case_ : BertSelfAttention = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) snake_case_ : Dict = roberta_layer.self_attn.q_proj.weight snake_case_ : Any = roberta_layer.self_attn.q_proj.bias snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.weight snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.bias snake_case_ : Optional[int] = roberta_layer.self_attn.v_proj.weight snake_case_ : Any = roberta_layer.self_attn.v_proj.bias # self-attention output snake_case_ : BertSelfOutput = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape snake_case_ : List[str] = roberta_layer.self_attn.out_proj.weight snake_case_ : Optional[int] = roberta_layer.self_attn.out_proj.bias # this one is final layer norm snake_case_ : int = roberta_layer.final_layer_norm.weight snake_case_ : Union[str, Any] = roberta_layer.final_layer_norm.bias # intermediate snake_case_ : BertIntermediate = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape snake_case_ : List[str] = roberta_layer.fca.weight snake_case_ : List[Any] = roberta_layer.fca.bias # output snake_case_ : BertOutput = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape snake_case_ : Any = roberta_layer.fca.weight snake_case_ : Any = roberta_layer.fca.bias # end of layer if classification_head: snake_case_ : int = roberta.model.classification_heads["mnli"].dense.weight snake_case_ : Union[str, Any] = roberta.model.classification_heads["mnli"].dense.bias snake_case_ : Tuple = roberta.model.classification_heads["mnli"].out_proj.weight snake_case_ : str = roberta.model.classification_heads["mnli"].out_proj.bias else: # LM Head snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.dense.weight snake_case_ : int = roberta.model.encoder.lm_head.dense.bias snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.weight snake_case_ : Optional[int] = roberta.model.encoder.lm_head.layer_norm.bias snake_case_ : int = roberta.model.encoder.lm_head.weight snake_case_ : List[str] = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. snake_case_ : torch.Tensor = roberta.encode(__magic_name__ ).unsqueeze(0 ) # batch of size 1 snake_case_ : Union[str, Any] = model(__magic_name__ )[0] if classification_head: snake_case_ : Optional[Any] = roberta.model.classification_heads["mnli"](roberta.extract_features(__magic_name__ ) ) else: snake_case_ : List[str] = roberta.model(__magic_name__ )[0] print(our_output.shape ,their_output.shape ) snake_case_ : str = torch.max(torch.abs(our_output - their_output ) ).item() print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 snake_case_ : Any = torch.allclose(__magic_name__ ,__magic_name__ ,atol=1E-3 ) print("Do both models output the same tensors?" ,"🔥" if success else "💩" ) if not success: raise Exception("Something went wRoNg" ) pathlib.Path(__magic_name__ ).mkdir(parents=__magic_name__ ,exist_ok=__magic_name__ ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__magic_name__ ) if __name__ == "__main__": __lowerCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.''' ) __lowerCamelCase : Tuple = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
656
1
'''simple docstring''' import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets __lowerCamelCase : Dict = '''\ @inproceedings{pillutla-etal:mauve:neurips2021, title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers}, author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid}, booktitle = {NeurIPS}, year = {2021} } ''' __lowerCamelCase : Dict = '''\ MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure. MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences. For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021). This metrics is a wrapper around the official implementation of MAUVE: https://github.com/krishnap25/mauve ''' __lowerCamelCase : Optional[Any] = ''' Calculates MAUVE scores between two lists of generated text and reference text. Args: predictions: list of generated text to score. Each predictions should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. Optional Args: num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1 kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9 kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5 kmeans_max_iter: maximum number of k-means iterations. Default 500 featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\']. device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU max_text_length: maximum number of tokens to consider. Default 1024 divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25 mauve_scaling_factor: "c" from the paper. Default 5. verbose: If True (default), print running time updates seed: random seed to initialize k-means cluster assignments. Returns: mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer, frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer, divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve, p_hist: a discrete distribution, which is a quantized version of the text distribution p_text, q_hist: same as above, but with q_text. Examples: >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest >>> import datasets >>> mauve = datasets.load_metric(\'mauve\') >>> predictions = ["hello there", "general kenobi"] >>> references = ["hello there", "general kenobi"] >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP >>> print(out.mauve) # doctest: +SKIP 1.0 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A_ (datasets.Metric ): """simple docstring""" def _A ( self :List[Any] ) -> int: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[ "https://arxiv.org/abs/2102.01454", "https://github.com/krishnap25/mauve", ] , ) def _A ( self :Dict , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :int=None , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :int="auto" , lowerCAmelCase__ :Optional[int]=-1 , lowerCAmelCase__ :Union[str, Any]=0.9 , lowerCAmelCase__ :Any=5 , lowerCAmelCase__ :Optional[int]=500 , lowerCAmelCase__ :Optional[Any]="gpt2-large" , lowerCAmelCase__ :str=-1 , lowerCAmelCase__ :List[str]=1_024 , lowerCAmelCase__ :Union[str, Any]=25 , lowerCAmelCase__ :Optional[int]=5 , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Dict=25 , ) -> Optional[Any]: '''simple docstring''' snake_case_ : Optional[Any] = compute_mauve( p_text=lowerCAmelCase__ , q_text=lowerCAmelCase__ , p_features=lowerCAmelCase__ , q_features=lowerCAmelCase__ , p_tokens=lowerCAmelCase__ , q_tokens=lowerCAmelCase__ , num_buckets=lowerCAmelCase__ , pca_max_data=lowerCAmelCase__ , kmeans_explained_var=lowerCAmelCase__ , kmeans_num_redo=lowerCAmelCase__ , kmeans_max_iter=lowerCAmelCase__ , featurize_model_name=lowerCAmelCase__ , device_id=lowerCAmelCase__ , max_text_length=lowerCAmelCase__ , divergence_curve_discretization_size=lowerCAmelCase__ , mauve_scaling_factor=lowerCAmelCase__ , verbose=lowerCAmelCase__ , seed=lowerCAmelCase__ , ) return out
656
'''simple docstring''' import os import sys import tempfile import torch from .state import AcceleratorState from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=None ,__magic_name__="no" ,__magic_name__="29500" )-> Optional[int]: """simple docstring""" snake_case_ : str = False snake_case_ : int = False if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ): snake_case_ : Any = True elif "IPython" in sys.modules: snake_case_ : Union[str, Any] = "google.colab" in str(sys.modules["IPython"].get_ipython() ) try: snake_case_ : Any = PrecisionType(mixed_precision.lower() ) except ValueError: raise ValueError( F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' ) if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" ,__magic_name__ ) is not None): # TPU launch import torch_xla.distributed.xla_multiprocessing as xmp if len(AcceleratorState._shared_state ) > 0: raise ValueError( "To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside " "your training function. Restart your notebook and make sure no cells initializes an " "`Accelerator`." ) if num_processes is None: snake_case_ : Tuple = 8 snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="TPU" ) print(F'''Launching a training on {num_processes} TPU cores.''' ) xmp.spawn(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" ) elif in_colab: # No need for a distributed launch otherwise as it's either CPU or one GPU. if torch.cuda.is_available(): print("Launching training on one GPU." ) else: print("Launching training on one CPU." ) function(*__magic_name__ ) else: if num_processes is None: raise ValueError( "You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." ) if num_processes > 1: # Multi-GPU launch from torch.multiprocessing import start_processes from torch.multiprocessing.spawn import ProcessRaisedException if len(AcceleratorState._shared_state ) > 0: raise ValueError( "To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized " "inside your training function. Restart your notebook and make sure no cells initializes an " "`Accelerator`." ) if torch.cuda.is_initialized(): raise ValueError( "To launch a multi-GPU training from your notebook, you need to avoid running any instruction " "using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA " "function." ) # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=__magic_name__ ,master_addr="127.0.01" ,master_port=__magic_name__ ,mixed_precision=__magic_name__ ): snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="MULTI_GPU" ) print(F'''Launching training on {num_processes} GPUs.''' ) try: start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" ) except ProcessRaisedException as e: if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: raise RuntimeError( "CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. " "This likely stems from an outside import causing issues once the `notebook_launcher()` is called. " "Please review your imports and test them when running the `notebook_launcher()` to identify " "which one is problematic." ) from e else: # No need for a distributed launch otherwise as it's either CPU, GPU or MPS. if is_mps_available(): snake_case_ : Any = "1" print("Launching training on MPS." ) elif torch.cuda.is_available(): print("Launching training on one GPU." ) else: print("Launching training on CPU." ) function(*__magic_name__ ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=2 )-> Dict: """simple docstring""" from torch.multiprocessing import start_processes with tempfile.NamedTemporaryFile() as tmp_file: # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=__magic_name__ ,master_addr="127.0.01" ,master_port="29500" ,accelerate_mixed_precision="no" ,accelerate_debug_rdv_file=tmp_file.name ,accelerate_use_cpu="yes" ,): snake_case_ : Any = PrepareForLaunch(__magic_name__ ,debug=__magic_name__ ) start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
656
1
'''simple docstring''' import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A_ : """simple docstring""" def __init__( self :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str]=13 , lowerCAmelCase__ :Any=32 , lowerCAmelCase__ :Tuple=2 , lowerCAmelCase__ :Tuple=3 , lowerCAmelCase__ :Dict=16 , lowerCAmelCase__ :Optional[int]=[32, 64, 128] , lowerCAmelCase__ :int=[1, 2, 1] , lowerCAmelCase__ :Union[str, Any]=[2, 2, 4] , lowerCAmelCase__ :Optional[int]=2 , lowerCAmelCase__ :Tuple=2.0 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :List[str]=0.0 , lowerCAmelCase__ :List[Any]=0.0 , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Any="gelu" , lowerCAmelCase__ :int=False , lowerCAmelCase__ :str=True , lowerCAmelCase__ :Optional[int]=0.0_2 , lowerCAmelCase__ :str=1E-5 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Union[str, Any]=10 , lowerCAmelCase__ :Dict=8 , lowerCAmelCase__ :Optional[int]=["stage1", "stage2"] , lowerCAmelCase__ :Union[str, Any]=[1, 2] , ) -> Optional[Any]: '''simple docstring''' snake_case_ : Optional[Any] = parent snake_case_ : Dict = batch_size snake_case_ : str = image_size snake_case_ : Dict = patch_size snake_case_ : Union[str, Any] = num_channels snake_case_ : Optional[int] = embed_dim snake_case_ : List[Any] = hidden_sizes snake_case_ : Optional[int] = depths snake_case_ : Tuple = num_heads snake_case_ : str = window_size snake_case_ : List[Any] = mlp_ratio snake_case_ : List[Any] = qkv_bias snake_case_ : List[str] = hidden_dropout_prob snake_case_ : Optional[Any] = attention_probs_dropout_prob snake_case_ : Tuple = drop_path_rate snake_case_ : Union[str, Any] = hidden_act snake_case_ : Optional[int] = use_absolute_embeddings snake_case_ : str = patch_norm snake_case_ : Optional[int] = layer_norm_eps snake_case_ : Union[str, Any] = initializer_range snake_case_ : str = is_training snake_case_ : Any = scope snake_case_ : Optional[int] = use_labels snake_case_ : Dict = type_sequence_label_size snake_case_ : Optional[Any] = encoder_stride snake_case_ : str = out_features snake_case_ : List[str] = out_indices def _A ( self :int ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ : List[str] = None if self.use_labels: snake_case_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ : Dict = self.get_config() return config, pixel_values, labels def _A ( self :str ) -> Optional[int]: '''simple docstring''' return FocalNetConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def _A ( self :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]: '''simple docstring''' snake_case_ : int = FocalNetModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ : int = model(lowerCAmelCase__ ) snake_case_ : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) snake_case_ : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def _A ( self :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Any ) -> int: '''simple docstring''' snake_case_ : int = FocalNetBackbone(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ : Optional[int] = model(lowerCAmelCase__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] ) # verify backbone works with out_features=None snake_case_ : str = None snake_case_ : Optional[int] = FocalNetBackbone(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ : Any = model(lowerCAmelCase__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _A ( self :List[str] , lowerCAmelCase__ :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int ) -> Tuple: '''simple docstring''' snake_case_ : Any = FocalNetForMaskedImageModeling(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ : str = model(lowerCAmelCase__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images snake_case_ : Optional[Any] = 1 snake_case_ : int = FocalNetForMaskedImageModeling(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case_ : Tuple = model(lowerCAmelCase__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def _A ( self :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any] ) -> List[str]: '''simple docstring''' snake_case_ : Dict = self.type_sequence_label_size snake_case_ : Any = FocalNetForImageClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ : Optional[int] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images snake_case_ : Tuple = 1 snake_case_ : List[str] = FocalNetForImageClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case_ : int = model(lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _A ( self :List[Any] ) -> int: '''simple docstring''' snake_case_ : Union[str, Any] = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_ : List[str] = config_and_inputs snake_case_ : Union[str, Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class A_ (a_ , a_ , unittest.TestCase ): """simple docstring""" a__ = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) a__ = ( {'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification} if is_torch_available() else {} ) a__ = False a__ = False a__ = False a__ = False a__ = False def _A ( self :Any ) -> Any: '''simple docstring''' snake_case_ : Union[str, Any] = FocalNetModelTester(self ) snake_case_ : Dict = ConfigTester(self , config_class=lowerCAmelCase__ , embed_dim=37 , has_text_modality=lowerCAmelCase__ ) def _A ( self :Dict ) -> Dict: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _A ( self :List[Any] ) -> Any: '''simple docstring''' return def _A ( self :Optional[int] ) -> List[Any]: '''simple docstring''' snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def _A ( self :List[Any] ) -> Tuple: '''simple docstring''' snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowerCAmelCase__ ) def _A ( self :str ) -> Tuple: '''simple docstring''' snake_case_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ ) def _A ( self :Any ) -> List[Any]: '''simple docstring''' snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ ) @unittest.skip(reason="FocalNet does not use inputs_embeds" ) def _A ( self :Tuple ) -> Any: '''simple docstring''' pass @unittest.skip(reason="FocalNet does not use feedforward chunking" ) def _A ( self :Any ) -> List[str]: '''simple docstring''' pass def _A ( self :Tuple ) -> List[Any]: '''simple docstring''' snake_case_, snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: snake_case_ : str = model_class(lowerCAmelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case_ : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) ) def _A ( self :List[Any] ) -> Optional[Any]: '''simple docstring''' snake_case_, snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: snake_case_ : int = model_class(lowerCAmelCase__ ) snake_case_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ : Optional[int] = [*signature.parameters.keys()] snake_case_ : List[str] = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCAmelCase__ ) def _A ( self :List[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :str ) -> List[Any]: '''simple docstring''' snake_case_ : Optional[Any] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): snake_case_ : int = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) snake_case_ : int = outputs.hidden_states snake_case_ : Dict = getattr( self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) # FocalNet has a different seq_length snake_case_ : Tuple = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case_ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) snake_case_ : List[str] = outputs.reshaped_hidden_states self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) snake_case_, snake_case_, snake_case_, snake_case_ : Union[str, Any] = reshaped_hidden_states[0].shape snake_case_ : Union[str, Any] = ( reshaped_hidden_states[0].view(lowerCAmelCase__ , lowerCAmelCase__ , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def _A ( self :Optional[int] ) -> Dict: '''simple docstring''' snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Optional[Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: snake_case_ : Tuple = True self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ : Tuple = True self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def _A ( self :Optional[Any] ) -> Dict: '''simple docstring''' snake_case_, snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Optional[Any] = 3 snake_case_ : Tuple = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) snake_case_ : Optional[Any] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case_ : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) snake_case_ : List[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: snake_case_ : Union[str, Any] = True self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ : Union[str, Any] = True self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width) ) @slow def _A ( self :Optional[Any] ) -> str: '''simple docstring''' for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : Any = FocalNetModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) def _A ( self :Union[str, Any] ) -> Tuple: '''simple docstring''' snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Tuple = _config_zero_init(lowerCAmelCase__ ) for model_class in self.all_model_classes: snake_case_ : str = model_class(config=lowerCAmelCase__ ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @require_vision @require_torch class A_ (unittest.TestCase ): """simple docstring""" @cached_property def _A ( self :Tuple ) -> int: '''simple docstring''' return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None @slow def _A ( self :str ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[str] = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(lowerCAmelCase__ ) snake_case_ : str = self.default_image_processor snake_case_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) snake_case_ : Any = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ ) # forward pass with torch.no_grad(): snake_case_ : str = model(**lowerCAmelCase__ ) # verify the logits snake_case_ : List[str] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase__ ) snake_case_ : Tuple = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCAmelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 ) @require_torch class A_ (a_ , unittest.TestCase ): """simple docstring""" a__ = (FocalNetBackbone,) if is_torch_available() else () a__ = FocalNetConfig a__ = False def _A ( self :Any ) -> List[Any]: '''simple docstring''' snake_case_ : Any = FocalNetModelTester(self )
656
'''simple docstring''' from collections import deque from math import floor from random import random from time import time class A_ : """simple docstring""" def __init__( self :Dict ) -> List[str]: '''simple docstring''' snake_case_ : int = {} def _A ( self :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any]=1 ) -> Any: '''simple docstring''' if self.graph.get(lowerCAmelCase__ ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: snake_case_ : Optional[int] = [[w, v]] if not self.graph.get(lowerCAmelCase__ ): snake_case_ : Dict = [] def _A ( self :List[Any] ) -> Optional[int]: '''simple docstring''' return list(self.graph ) def _A ( self :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :int ) -> List[Any]: '''simple docstring''' if self.graph.get(lowerCAmelCase__ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowerCAmelCase__ ) def _A ( self :List[str] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :str=-1 ) -> str: '''simple docstring''' if s == d: return [] snake_case_ : str = [] snake_case_ : Optional[int] = [] if s == -2: snake_case_ : List[Any] = list(self.graph )[0] stack.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) snake_case_ : Dict = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: snake_case_ : str = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowerCAmelCase__ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) snake_case_ : str = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowerCAmelCase__ ) != 0: snake_case_ : Union[str, Any] = stack[len(lowerCAmelCase__ ) - 1] else: snake_case_ : Optional[Any] = ss # check if se have reached the starting point if len(lowerCAmelCase__ ) == 0: return visited def _A ( self :Tuple , lowerCAmelCase__ :int=-1 ) -> int: '''simple docstring''' if c == -1: snake_case_ : Any = floor(random() * 10_000 ) + 10 for i in range(lowerCAmelCase__ ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): snake_case_ : Optional[Any] = floor(random() * c ) + 1 if n != i: self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 ) def _A ( self :Tuple , lowerCAmelCase__ :Dict=-2 ) -> Dict: '''simple docstring''' snake_case_ : Union[str, Any] = deque() snake_case_ : Optional[Any] = [] if s == -2: snake_case_ : Tuple = list(self.graph )[0] d.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) while d: snake_case_ : Optional[int] = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _A ( self :List[str] , lowerCAmelCase__ :str ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Tuple = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def _A ( self :Any , lowerCAmelCase__ :int ) -> Optional[Any]: '''simple docstring''' return len(self.graph[u] ) def _A ( self :Tuple , lowerCAmelCase__ :List[str]=-2 ) -> Optional[Any]: '''simple docstring''' snake_case_ : str = [] snake_case_ : str = [] if s == -2: snake_case_ : Optional[Any] = list(self.graph )[0] stack.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) snake_case_ : int = s snake_case_ : Optional[int] = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: snake_case_ : List[Any] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) snake_case_ : List[str] = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(lowerCAmelCase__ ) != 0: snake_case_ : int = stack[len(lowerCAmelCase__ ) - 1] else: snake_case_ : Union[str, Any] = ss # check if se have reached the starting point if len(lowerCAmelCase__ ) == 0: return sorted_nodes def _A ( self :Dict ) -> Any: '''simple docstring''' snake_case_ : Dict = [] snake_case_ : Any = [] snake_case_ : str = list(self.graph )[0] stack.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) snake_case_ : Optional[int] = -2 snake_case_ : Any = [] snake_case_ : List[Any] = s snake_case_ : int = False snake_case_ : Optional[int] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: snake_case_ : List[Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): snake_case_ : Any = len(lowerCAmelCase__ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) snake_case_ : Optional[int] = node[1] break # check if all the children are visited if s == ss: stack.pop() snake_case_ : Optional[Any] = True if len(lowerCAmelCase__ ) != 0: snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1] else: snake_case_ : str = False indirect_parents.append(lowerCAmelCase__ ) snake_case_ : List[str] = s snake_case_ : Optional[int] = ss # check if se have reached the starting point if len(lowerCAmelCase__ ) == 0: return list(lowerCAmelCase__ ) def _A ( self :Tuple ) -> List[str]: '''simple docstring''' snake_case_ : List[Any] = [] snake_case_ : Tuple = [] snake_case_ : List[str] = list(self.graph )[0] stack.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) snake_case_ : str = -2 snake_case_ : List[str] = [] snake_case_ : List[Any] = s snake_case_ : List[str] = False snake_case_ : Dict = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: snake_case_ : List[Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): snake_case_ : Any = len(lowerCAmelCase__ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) snake_case_ : str = node[1] break # check if all the children are visited if s == ss: stack.pop() snake_case_ : Tuple = True if len(lowerCAmelCase__ ) != 0: snake_case_ : List[Any] = stack[len(lowerCAmelCase__ ) - 1] else: snake_case_ : Optional[int] = False indirect_parents.append(lowerCAmelCase__ ) snake_case_ : int = s snake_case_ : Union[str, Any] = ss # check if se have reached the starting point if len(lowerCAmelCase__ ) == 0: return False def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[int]=-2 , lowerCAmelCase__ :Tuple=-1 ) -> str: '''simple docstring''' snake_case_ : Optional[int] = time() self.dfs(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : Optional[Any] = time() return end - begin def _A ( self :Any , lowerCAmelCase__ :Tuple=-2 ) -> Optional[Any]: '''simple docstring''' snake_case_ : Any = time() self.bfs(lowerCAmelCase__ ) snake_case_ : Any = time() return end - begin class A_ : """simple docstring""" def __init__( self :Tuple ) -> List[str]: '''simple docstring''' snake_case_ : Optional[Any] = {} def _A ( self :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=1 ) -> str: '''simple docstring''' if self.graph.get(lowerCAmelCase__ ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist snake_case_ : str = [[w, v]] # add the other way if self.graph.get(lowerCAmelCase__ ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist snake_case_ : List[str] = [[w, u]] def _A ( self :Dict , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]: '''simple docstring''' if self.graph.get(lowerCAmelCase__ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowerCAmelCase__ ) # the other way round if self.graph.get(lowerCAmelCase__ ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(lowerCAmelCase__ ) def _A ( self :Optional[Any] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> int: '''simple docstring''' if s == d: return [] snake_case_ : Any = [] snake_case_ : Dict = [] if s == -2: snake_case_ : Optional[int] = list(self.graph )[0] stack.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) snake_case_ : Tuple = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: snake_case_ : List[str] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowerCAmelCase__ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) snake_case_ : str = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowerCAmelCase__ ) != 0: snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1] else: snake_case_ : str = ss # check if se have reached the starting point if len(lowerCAmelCase__ ) == 0: return visited def _A ( self :Optional[int] , lowerCAmelCase__ :str=-1 ) -> List[Any]: '''simple docstring''' if c == -1: snake_case_ : Optional[int] = floor(random() * 10_000 ) + 10 for i in range(lowerCAmelCase__ ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): snake_case_ : str = floor(random() * c ) + 1 if n != i: self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 ) def _A ( self :Any , lowerCAmelCase__ :Optional[Any]=-2 ) -> List[Any]: '''simple docstring''' snake_case_ : List[str] = deque() snake_case_ : Optional[Any] = [] if s == -2: snake_case_ : List[Any] = list(self.graph )[0] d.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) while d: snake_case_ : Optional[int] = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _A ( self :str , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]: '''simple docstring''' return len(self.graph[u] ) def _A ( self :Union[str, Any] ) -> Dict: '''simple docstring''' snake_case_ : Any = [] snake_case_ : Optional[Any] = [] snake_case_ : Optional[Any] = list(self.graph )[0] stack.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) snake_case_ : Tuple = -2 snake_case_ : Optional[int] = [] snake_case_ : Tuple = s snake_case_ : Optional[Any] = False snake_case_ : Optional[int] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: snake_case_ : Optional[Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): snake_case_ : Optional[int] = len(lowerCAmelCase__ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) snake_case_ : Tuple = node[1] break # check if all the children are visited if s == ss: stack.pop() snake_case_ : Optional[int] = True if len(lowerCAmelCase__ ) != 0: snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1] else: snake_case_ : Optional[int] = False indirect_parents.append(lowerCAmelCase__ ) snake_case_ : List[Any] = s snake_case_ : Dict = ss # check if se have reached the starting point if len(lowerCAmelCase__ ) == 0: return list(lowerCAmelCase__ ) def _A ( self :Optional[Any] ) -> Tuple: '''simple docstring''' snake_case_ : Optional[Any] = [] snake_case_ : int = [] snake_case_ : List[str] = list(self.graph )[0] stack.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) snake_case_ : Tuple = -2 snake_case_ : int = [] snake_case_ : int = s snake_case_ : Optional[Any] = False snake_case_ : List[Any] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: snake_case_ : Union[str, Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): snake_case_ : Tuple = len(lowerCAmelCase__ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) snake_case_ : Optional[Any] = node[1] break # check if all the children are visited if s == ss: stack.pop() snake_case_ : Optional[Any] = True if len(lowerCAmelCase__ ) != 0: snake_case_ : Tuple = stack[len(lowerCAmelCase__ ) - 1] else: snake_case_ : Optional[int] = False indirect_parents.append(lowerCAmelCase__ ) snake_case_ : Union[str, Any] = s snake_case_ : Tuple = ss # check if se have reached the starting point if len(lowerCAmelCase__ ) == 0: return False def _A ( self :Any ) -> Tuple: '''simple docstring''' return list(self.graph ) def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> str: '''simple docstring''' snake_case_ : List[str] = time() self.dfs(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : List[Any] = time() return end - begin def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any]=-2 ) -> int: '''simple docstring''' snake_case_ : List[str] = time() self.bfs(lowerCAmelCase__ ) snake_case_ : Tuple = time() return end - begin
656
1
'''simple docstring''' import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class A_ (unittest.TestCase ): """simple docstring""" def _A ( self :str ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Union[str, Any] = torch.nn.Linear(10 , 10 ) snake_case_ : Dict = torch.optim.SGD(model.parameters() , 0.1 ) snake_case_ : Tuple = Accelerator() snake_case_ : Optional[Any] = accelerator.prepare(lowerCAmelCase__ ) try: pickle.loads(pickle.dumps(lowerCAmelCase__ ) ) except Exception as e: self.fail(F'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
656
'''simple docstring''' import gzip import hashlib import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser __lowerCamelCase : List[str] = re.compile(R'''\s+''') def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]: """simple docstring""" return {"hash": hashlib.mda(re.sub(__magic_name__ ,"" ,example["content"] ).encode("utf-8" ) ).hexdigest()} def __UpperCAmelCase ( __magic_name__ )-> str: """simple docstring""" snake_case_ : Optional[Any] = [len(__magic_name__ ) for line in example["content"].splitlines()] return {"line_mean": np.mean(__magic_name__ ), "line_max": max(__magic_name__ )} def __UpperCAmelCase ( __magic_name__ )-> int: """simple docstring""" snake_case_ : Optional[int] = np.mean([c.isalnum() for c in example["content"]] ) return {"alpha_frac": alpha_frac} def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Tuple: """simple docstring""" if example["hash"] in uniques: uniques.remove(example["hash"] ) return True else: return False def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 )-> Tuple: """simple docstring""" snake_case_ : List[str] = ["auto-generated", "autogenerated", "automatically generated"] snake_case_ : Optional[Any] = example["content"].splitlines() for _, line in zip(range(__magic_name__ ) ,__magic_name__ ): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 ,__magic_name__=0.05 )-> Optional[Any]: """simple docstring""" snake_case_ : str = ["unit tests", "test file", "configuration file"] snake_case_ : int = example["content"].splitlines() snake_case_ : Optional[Any] = 0 snake_case_ : Any = 0 # first test for _, line in zip(range(__magic_name__ ) ,__magic_name__ ): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test snake_case_ : Tuple = example["content"].count("\n" ) snake_case_ : int = int(coeff * nlines ) for line in lines: count_config += line.lower().count("config" ) count_test += line.lower().count("test" ) if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def __UpperCAmelCase ( __magic_name__ )-> str: """simple docstring""" snake_case_ : List[Any] = ["def ", "class ", "for ", "while "] snake_case_ : Optional[Any] = example["content"].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def __UpperCAmelCase ( __magic_name__ ,__magic_name__=4 )-> Optional[int]: """simple docstring""" snake_case_ : Tuple = example["content"].splitlines() snake_case_ : Tuple = 0 for line in lines: counter += line.lower().count("=" ) if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def __UpperCAmelCase ( __magic_name__ )-> List[Any]: """simple docstring""" snake_case_ : Tuple = tokenizer(example["content"] ,truncation=__magic_name__ )["input_ids"] snake_case_ : int = len(example["content"] ) / len(__magic_name__ ) return {"ratio": ratio} def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]: """simple docstring""" snake_case_ : Union[str, Any] = {} results.update(get_hash(__magic_name__ ) ) results.update(line_stats(__magic_name__ ) ) results.update(alpha_stats(__magic_name__ ) ) results.update(char_token_ratio(__magic_name__ ) ) results.update(is_autogenerated(__magic_name__ ) ) results.update(is_config_or_test(__magic_name__ ) ) results.update(has_no_keywords(__magic_name__ ) ) results.update(has_few_assignments(__magic_name__ ) ) return results def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Tuple: """simple docstring""" if not check_uniques(__magic_name__ ,__magic_name__ ): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def __UpperCAmelCase ( __magic_name__ )-> Dict: """simple docstring""" with open(__magic_name__ ,"rb" ) as f_in: with gzip.open(str(__magic_name__ ) + ".gz" ,"wb" ,compresslevel=6 ) as f_out: shutil.copyfileobj(__magic_name__ ,__magic_name__ ) os.unlink(__magic_name__ ) # Settings __lowerCamelCase : List[Any] = HfArgumentParser(PreprocessingArguments) __lowerCamelCase : str = parser.parse_args() if args.num_workers is None: __lowerCamelCase : List[Any] = multiprocessing.cpu_count() __lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset __lowerCamelCase : Any = time.time() __lowerCamelCase : str = load_dataset(args.dataset_name, split='''train''') print(f'''Time to load dataset: {time.time()-t_start:.2f}''') # Run preprocessing __lowerCamelCase : List[str] = time.time() __lowerCamelCase : Any = ds.map(preprocess, num_proc=args.num_workers) print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''') # Deduplicate hashes __lowerCamelCase : Any = set(ds.unique('''hash''')) __lowerCamelCase : Optional[int] = len(uniques) / len(ds) print(f'''Fraction of duplicates: {1-frac:.2%}''') # Deduplicate data and apply heuristics __lowerCamelCase : List[str] = time.time() __lowerCamelCase : Tuple = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args}) print(f'''Time to filter dataset: {time.time()-t_start:.2f}''') print(f'''Size of filtered dataset: {len(ds_filter)}''') # Deduplicate with minhash and jaccard similarity if args.near_deduplication: __lowerCamelCase : List[str] = time.time() __lowerCamelCase , __lowerCamelCase : Tuple = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''') print(f'''Size of deduplicate dataset: {len(ds_filter)}''') # Save data in batches of samples_per_file __lowerCamelCase : List[Any] = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / '''duplicate_clusters.json''', '''w''') as f: json.dump(duplicate_clusters, f) __lowerCamelCase : List[str] = output_dir / '''data''' data_dir.mkdir(exist_ok=True) __lowerCamelCase : int = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): __lowerCamelCase : Union[str, Any] = str(data_dir / f'''file-{file_number+1:012}.json''') __lowerCamelCase : List[Any] = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
656
1
'''simple docstring''' def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[Any]: """simple docstring""" snake_case_ : Optional[int] = [0 for i in range(r + 1 )] # nc0 = 1 snake_case_ : Tuple = 1 for i in range(1 ,n + 1 ): # to compute current row from previous row. snake_case_ : Union[str, Any] = min(__magic_name__ ,__magic_name__ ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
656
'''simple docstring''' import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class A_ (unittest.TestCase ): """simple docstring""" def _A ( self :str ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Union[str, Any] = torch.nn.Linear(10 , 10 ) snake_case_ : Dict = torch.optim.SGD(model.parameters() , 0.1 ) snake_case_ : Tuple = Accelerator() snake_case_ : Optional[Any] = accelerator.prepare(lowerCAmelCase__ ) try: pickle.loads(pickle.dumps(lowerCAmelCase__ ) ) except Exception as e: self.fail(F'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
656
1
'''simple docstring''' import comet # From: unbabel-comet import torch import datasets __lowerCamelCase : Optional[int] = datasets.logging.get_logger(__name__) __lowerCamelCase : Optional[Any] = '''\ @inproceedings{rei-EtAl:2020:WMT, author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon}, title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task}, booktitle = {Proceedings of the Fifth Conference on Machine Translation}, month = {November}, year = {2020}, address = {Online}, publisher = {Association for Computational Linguistics}, pages = {909--918}, } @inproceedings{rei-etal-2020-comet, title = "{COMET}: A Neural Framework for {MT} Evaluation", author = "Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon", booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/2020.emnlp-main.213", pages = "2685--2702", } ''' __lowerCamelCase : List[Any] = '''\ Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM). With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition. See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information. ''' __lowerCamelCase : List[str] = ''' COMET score. Args: `sources` (list of str): Source sentences `predictions` (list of str): candidate translations `references` (list of str): reference translations `cuda` (bool): If set to True, runs COMET using GPU `show_progress` (bool): Shows progress `model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None. Returns: `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`. `scores`: List of scores. Examples: >>> comet_metric = datasets.load_metric(\'comet\') >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."] >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"] >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"] >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source) >>> print([round(v, 2) for v in results["scores"]]) [0.19, 0.92] ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A_ (datasets.Metric ): """simple docstring""" def _A ( self :Any ) -> List[Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "sources": datasets.Value("string" , id="sequence" ), "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[ "https://github.com/Unbabel/COMET", "https://www.aclweb.org/anthology/2020.emnlp-main.213/", "http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6", ] , ) def _A ( self :Tuple , lowerCAmelCase__ :Union[str, Any] ) -> int: '''simple docstring''' if self.config_name == "default": snake_case_ : Any = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) ) else: snake_case_ : Optional[Any] = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def _A ( self :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Optional[int]=False ) -> str: '''simple docstring''' if gpus is None: snake_case_ : int = 1 if torch.cuda.is_available() else 0 snake_case_ : str = {"src": sources, "mt": predictions, "ref": references} snake_case_ : Optional[Any] = [dict(zip(lowerCAmelCase__ , lowerCAmelCase__ ) ) for t in zip(*data.values() )] snake_case_, snake_case_ : Dict = self.scorer.predict(lowerCAmelCase__ , gpus=lowerCAmelCase__ , progress_bar=lowerCAmelCase__ ) return {"mean_score": mean_score, "scores": scores}
656
'''simple docstring''' import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py __lowerCamelCase : Any = '''src/transformers''' # This is to make sure the transformers module imported is the one in the repo. __lowerCamelCase : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS) __lowerCamelCase : Optional[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` __lowerCamelCase : Union[str, Any] = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''') __lowerCamelCase : Any = { '''DecisionTransformerConfig''', '''EncoderDecoderConfig''', '''MusicgenConfig''', '''RagConfig''', '''SpeechEncoderDecoderConfig''', '''TimmBackboneConfig''', '''VisionEncoderDecoderConfig''', '''VisionTextDualEncoderConfig''', '''LlamaConfig''', } def __UpperCAmelCase ( __magic_name__ )-> List[Any]: """simple docstring""" snake_case_ : Tuple = None # source code of `config_class` snake_case_ : List[Any] = inspect.getsource(__magic_name__ ) snake_case_ : List[str] = _re_checkpoint.findall(__magic_name__ ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith("/" ): snake_case_ : Optional[Any] = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link snake_case_ : str = F'''https://huggingface.co/{ckpt_name}''' if ckpt_link == ckpt_link_from_name: snake_case_ : Dict = ckpt_name break return checkpoint def __UpperCAmelCase ( )-> Dict: """simple docstring""" snake_case_ : Optional[int] = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue snake_case_ : str = get_checkpoint_from_config_class(__magic_name__ ) snake_case_ : Union[str, Any] = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(__magic_name__ ) if len(__magic_name__ ) > 0: snake_case_ : Tuple = "\n".join(sorted(__magic_name__ ) ) raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
656
1
'''simple docstring''' def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Optional[int]: """simple docstring""" if b == 0: return 1 if (b % 2) == 0: return actual_power(__magic_name__ ,int(b / 2 ) ) * actual_power(__magic_name__ ,int(b / 2 ) ) else: return a * actual_power(__magic_name__ ,int(b / 2 ) ) * actual_power(__magic_name__ ,int(b / 2 ) ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float: """simple docstring""" if b < 0: return 1 / actual_power(__magic_name__ ,__magic_name__ ) return actual_power(__magic_name__ ,__magic_name__ ) if __name__ == "__main__": print(power(-2, -3))
656
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : List[str] = logging.get_logger(__name__) __lowerCamelCase : int = { '''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''', # See all Cvt models at https://huggingface.co/models?filter=cvt } class A_ (a_ ): """simple docstring""" a__ = '''cvt''' def __init__( self :List[Any] , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Any=[7, 3, 3] , lowerCAmelCase__ :Dict=[4, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[2, 1, 1] , lowerCAmelCase__ :Any=[64, 192, 384] , lowerCAmelCase__ :List[str]=[1, 3, 6] , lowerCAmelCase__ :str=[1, 2, 10] , lowerCAmelCase__ :Any=[4.0, 4.0, 4.0] , lowerCAmelCase__ :int=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Optional[Any]=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Dict=[0.0, 0.0, 0.1] , lowerCAmelCase__ :List[Any]=[True, True, True] , lowerCAmelCase__ :List[Any]=[False, False, True] , lowerCAmelCase__ :Dict=["dw_bn", "dw_bn", "dw_bn"] , lowerCAmelCase__ :Any=[3, 3, 3] , lowerCAmelCase__ :Tuple=[1, 1, 1] , lowerCAmelCase__ :Optional[int]=[2, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[1, 1, 1] , lowerCAmelCase__ :Any=[1, 1, 1] , lowerCAmelCase__ :List[str]=0.0_2 , lowerCAmelCase__ :Dict=1E-1_2 , **lowerCAmelCase__ :Optional[Any] , ) -> str: '''simple docstring''' super().__init__(**lowerCAmelCase__ ) snake_case_ : int = num_channels snake_case_ : int = patch_sizes snake_case_ : Optional[Any] = patch_stride snake_case_ : Dict = patch_padding snake_case_ : Tuple = embed_dim snake_case_ : Optional[int] = num_heads snake_case_ : Union[str, Any] = depth snake_case_ : Optional[int] = mlp_ratio snake_case_ : Tuple = attention_drop_rate snake_case_ : str = drop_rate snake_case_ : Tuple = drop_path_rate snake_case_ : Any = qkv_bias snake_case_ : Union[str, Any] = cls_token snake_case_ : int = qkv_projection_method snake_case_ : Any = kernel_qkv snake_case_ : Union[str, Any] = padding_kv snake_case_ : str = stride_kv snake_case_ : Dict = padding_q snake_case_ : Tuple = stride_q snake_case_ : Any = initializer_range snake_case_ : Any = layer_norm_eps
656
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule __lowerCamelCase : Any = {'''tokenization_byt5''': ['''ByT5Tokenizer''']} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys __lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
656
'''simple docstring''' import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets __lowerCamelCase : str = '''\ @inproceedings{snover-etal-2006-study, title = "A Study of Translation Edit Rate with Targeted Human Annotation", author = "Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John", booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers", month = aug # " 8-12", year = "2006", address = "Cambridge, Massachusetts, USA", publisher = "Association for Machine Translation in the Americas", url = "https://aclanthology.org/2006.amta-papers.25", pages = "223--231", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' __lowerCamelCase : Dict = '''\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. ''' __lowerCamelCase : int = ''' Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: \'score\' (float): TER score (num_edits / sum_ref_lengths * 100) \'num_edits\' (int): The cumulative number of edits \'ref_length\' (float): The cumulative average reference length Examples: Example 1: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0} Example 2: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0} Example 3: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5} Example 4: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0} Example 5: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A_ (datasets.Metric ): """simple docstring""" def _A ( self :Optional[Any] ) -> Optional[int]: '''simple docstring''' if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[ "https://github.com/jhclark/tercom", ] , ) def _A ( self :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[str] = len(references[0] ) if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) snake_case_ : List[str] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )] snake_case_ : List[str] = TER( normalized=lowerCAmelCase__ , no_punct=lowerCAmelCase__ , asian_support=lowerCAmelCase__ , case_sensitive=lowerCAmelCase__ , ) snake_case_ : Any = sb_ter.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
656
1
'''simple docstring''' from __future__ import annotations from typing import Any class A_ : """simple docstring""" def __init__( self :Union[str, Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :float = 0 ) -> None: '''simple docstring''' snake_case_, snake_case_ : str = row, column snake_case_ : Dict = [[default_value for c in range(lowerCAmelCase__ )] for r in range(lowerCAmelCase__ )] def __str__( self :Optional[Any] ) -> str: '''simple docstring''' snake_case_ : str = F'''Matrix consist of {self.row} rows and {self.column} columns\n''' # Make string identifier snake_case_ : Any = 0 for row_vector in self.array: for obj in row_vector: snake_case_ : Dict = max(lowerCAmelCase__ , len(str(lowerCAmelCase__ ) ) ) snake_case_ : str = F'''%{max_element_length}s''' # Make string and return def single_line(lowerCAmelCase__ :list[float] ) -> str: nonlocal string_format_identifier snake_case_ : Tuple = "[" line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(lowerCAmelCase__ ) for row_vector in self.array ) return s def __repr__( self :Tuple ) -> str: '''simple docstring''' return str(self ) def _A ( self :List[Any] , lowerCAmelCase__ :tuple[int, int] ) -> bool: '''simple docstring''' if not (isinstance(lowerCAmelCase__ , (list, tuple) ) and len(lowerCAmelCase__ ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self :Optional[Any] , lowerCAmelCase__ :tuple[int, int] ) -> Any: '''simple docstring''' assert self.validate_indicies(lowerCAmelCase__ ) return self.array[loc[0]][loc[1]] def __setitem__( self :Tuple , lowerCAmelCase__ :tuple[int, int] , lowerCAmelCase__ :float ) -> None: '''simple docstring''' assert self.validate_indicies(lowerCAmelCase__ ) snake_case_ : Any = value def __add__( self :Dict , lowerCAmelCase__ :Matrix ) -> Matrix: '''simple docstring''' assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) assert self.row == another.row and self.column == another.column # Add snake_case_ : str = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): snake_case_ : Optional[int] = self[r, c] + another[r, c] return result def __neg__( self :int ) -> Matrix: '''simple docstring''' snake_case_ : Union[str, Any] = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): snake_case_ : Any = -self[r, c] return result def __sub__( self :Dict , lowerCAmelCase__ :Matrix ) -> Matrix: '''simple docstring''' return self + (-another) def __mul__( self :Optional[int] , lowerCAmelCase__ :int | float | Matrix ) -> Matrix: '''simple docstring''' if isinstance(lowerCAmelCase__ , (int, float) ): # Scalar multiplication snake_case_ : List[Any] = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): snake_case_ : Dict = self[r, c] * another return result elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): # Matrix multiplication assert self.column == another.row snake_case_ : Any = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: snake_case_ : List[Any] = F'''Unsupported type given for another ({type(lowerCAmelCase__ )})''' raise TypeError(lowerCAmelCase__ ) def _A ( self :Optional[int] ) -> Matrix: '''simple docstring''' snake_case_ : str = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): snake_case_ : Union[str, Any] = self[r, c] return result def _A ( self :Dict , lowerCAmelCase__ :Matrix , lowerCAmelCase__ :Matrix ) -> Any: '''simple docstring''' assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate snake_case_ : Dict = v.transpose() snake_case_ : int = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def __UpperCAmelCase ( )-> None: """simple docstring""" snake_case_ : Dict = Matrix(3 ,3 ,0 ) for i in range(3 ): snake_case_ : Any = 1 print(F'''a^(-1) is {ainv}''' ) # u, v snake_case_ : int = Matrix(3 ,1 ,0 ) snake_case_, snake_case_, snake_case_ : Optional[Any] = 1, 2, -3 snake_case_ : Dict = Matrix(3 ,1 ,0 ) snake_case_, snake_case_, snake_case_ : Optional[Any] = 4, -2, 5 print(F'''u is {u}''' ) print(F'''v is {v}''' ) print(F'''uv^T is {u * v.transpose()}''' ) # Sherman Morrison print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(__magic_name__ ,__magic_name__ )}''' ) def __UpperCAmelCase ( )-> None: """simple docstring""" import doctest doctest.testmod() testa()
656
'''simple docstring''' from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def __UpperCAmelCase ( )-> int: """simple docstring""" snake_case_ : Any = { "repo_name": ["test_repo1", "test_repo2", "test_repo3"], "path": ["test_1.py", "test_2.py", "unit_test.py"], "content": ["a " * 20, "a " * 30, "b " * 7], } snake_case_ : int = Dataset.from_dict(__magic_name__ ) return dataset class A_ (a_ ): """simple docstring""" def _A ( self :List[str] ) -> str: '''simple docstring''' snake_case_ : Union[str, Any] = get_dataset() snake_case_ : Optional[int] = make_duplicate_clusters(lowerCAmelCase__ , 0.8_5 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def _A ( self :Union[str, Any] ) -> List[str]: '''simple docstring''' snake_case_ : Optional[int] = get_dataset() snake_case_, snake_case_ : List[Any] = deduplicate_dataset(lowerCAmelCase__ ) self.assertEqual(len(lowerCAmelCase__ ) , 2 ) print(lowerCAmelCase__ ) self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 ) self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , lowerCAmelCase__ )
656
1
'''simple docstring''' import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets __lowerCamelCase : List[Any] = ''' @inproceedings{xu-etal-2016-optimizing, title = {Optimizing Statistical Machine Translation for Text Simplification}, authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris}, journal = {Transactions of the Association for Computational Linguistics}, volume = {4}, year={2016}, url = {https://www.aclweb.org/anthology/Q16-1029}, pages = {401--415 }, @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' __lowerCamelCase : Dict = '''\ WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU It can be used to evaluate the quality of machine-generated texts. ''' __lowerCamelCase : List[str] = ''' Calculates sari score (between 0 and 100) given a list of source and predicted sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score. Args: sources: list of source sentences where each sentence should be a string. predictions: list of predicted sentences where each sentence should be a string. references: list of lists of reference sentences where each sentence should be a string. Returns: sari: sari score sacrebleu: sacrebleu score exact: exact score Examples: >>> sources=["About 95 species are currently accepted ."] >>> predictions=["About 95 you now get in ."] >>> references=[["About 95 species are currently known ."]] >>> wiki_split = datasets.load_metric("wiki_split") >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references) >>> print(results) {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0} ''' def __UpperCAmelCase ( __magic_name__ )-> List[Any]: """simple docstring""" def remove_articles(__magic_name__ ): snake_case_ : List[Any] = re.compile(r"\b(a|an|the)\b" ,re.UNICODE ) return re.sub(__magic_name__ ," " ,__magic_name__ ) def white_space_fix(__magic_name__ ): return " ".join(text.split() ) def remove_punc(__magic_name__ ): snake_case_ : Tuple = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__magic_name__ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__magic_name__ ) ) ) ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Dict: """simple docstring""" return int(normalize_answer(__magic_name__ ) == normalize_answer(__magic_name__ ) ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Optional[int]: """simple docstring""" snake_case_ : Optional[Any] = [any(compute_exact(__magic_name__ ,__magic_name__ ) for ref in refs ) for pred, refs in zip(__magic_name__ ,__magic_name__ )] return (sum(__magic_name__ ) / len(__magic_name__ )) * 100 def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> str: """simple docstring""" snake_case_ : Dict = [rgram for rgrams in rgramslist for rgram in rgrams] snake_case_ : Any = Counter(__magic_name__ ) snake_case_ : Tuple = Counter(__magic_name__ ) snake_case_ : Optional[Any] = Counter() for sgram, scount in sgramcounter.items(): snake_case_ : Tuple = scount * numref snake_case_ : List[str] = Counter(__magic_name__ ) snake_case_ : Tuple = Counter() for cgram, ccount in cgramcounter.items(): snake_case_ : Any = ccount * numref # KEEP snake_case_ : Optional[int] = sgramcounter_rep & cgramcounter_rep snake_case_ : str = keepgramcounter_rep & rgramcounter snake_case_ : str = sgramcounter_rep & rgramcounter snake_case_ : str = 0 snake_case_ : List[Any] = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. snake_case_ : int = 1 snake_case_ : Tuple = 1 if len(__magic_name__ ) > 0: snake_case_ : Optional[Any] = keeptmpscorea / len(__magic_name__ ) if len(__magic_name__ ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) snake_case_ : Optional[Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() ) snake_case_ : Any = 0 if keepscore_precision > 0 or keepscore_recall > 0: snake_case_ : Optional[Any] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION snake_case_ : Optional[int] = sgramcounter_rep - cgramcounter_rep snake_case_ : Optional[Any] = delgramcounter_rep - rgramcounter snake_case_ : int = sgramcounter_rep - rgramcounter snake_case_ : Dict = 0 snake_case_ : Union[str, Any] = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. snake_case_ : Dict = 1 if len(__magic_name__ ) > 0: snake_case_ : Any = deltmpscorea / len(__magic_name__ ) # ADDITION snake_case_ : Dict = set(__magic_name__ ) - set(__magic_name__ ) snake_case_ : Optional[int] = set(__magic_name__ ) & set(__magic_name__ ) snake_case_ : Dict = set(__magic_name__ ) - set(__magic_name__ ) snake_case_ : int = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. snake_case_ : Dict = 1 snake_case_ : Optional[Any] = 1 if len(__magic_name__ ) > 0: snake_case_ : Union[str, Any] = addtmpscore / len(__magic_name__ ) if len(__magic_name__ ) > 0: snake_case_ : str = addtmpscore / len(__magic_name__ ) snake_case_ : Optional[int] = 0 if addscore_precision > 0 or addscore_recall > 0: snake_case_ : int = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[Any]: """simple docstring""" snake_case_ : Union[str, Any] = len(__magic_name__ ) snake_case_ : Any = ssent.split(" " ) snake_case_ : Dict = csent.split(" " ) snake_case_ : int = [] snake_case_ : Tuple = [] snake_case_ : Union[str, Any] = [] snake_case_ : int = [] snake_case_ : int = [] snake_case_ : int = [] snake_case_ : str = [] snake_case_ : str = [] snake_case_ : List[str] = [] snake_case_ : List[Any] = [] for rsent in rsents: snake_case_ : Optional[Any] = rsent.split(" " ) snake_case_ : Union[str, Any] = [] snake_case_ : List[str] = [] snake_case_ : Union[str, Any] = [] ragramslist.append(__magic_name__ ) for i in range(0 ,len(__magic_name__ ) - 1 ): if i < len(__magic_name__ ) - 1: snake_case_ : Tuple = ragrams[i] + " " + ragrams[i + 1] ragrams.append(__magic_name__ ) if i < len(__magic_name__ ) - 2: snake_case_ : int = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] ragrams.append(__magic_name__ ) if i < len(__magic_name__ ) - 3: snake_case_ : str = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3] ragrams.append(__magic_name__ ) ragramslist.append(__magic_name__ ) ragramslist.append(__magic_name__ ) ragramslist.append(__magic_name__ ) for i in range(0 ,len(__magic_name__ ) - 1 ): if i < len(__magic_name__ ) - 1: snake_case_ : Optional[Any] = sagrams[i] + " " + sagrams[i + 1] sagrams.append(__magic_name__ ) if i < len(__magic_name__ ) - 2: snake_case_ : Optional[Any] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] sagrams.append(__magic_name__ ) if i < len(__magic_name__ ) - 3: snake_case_ : List[str] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3] sagrams.append(__magic_name__ ) for i in range(0 ,len(__magic_name__ ) - 1 ): if i < len(__magic_name__ ) - 1: snake_case_ : List[Any] = cagrams[i] + " " + cagrams[i + 1] cagrams.append(__magic_name__ ) if i < len(__magic_name__ ) - 2: snake_case_ : str = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] cagrams.append(__magic_name__ ) if i < len(__magic_name__ ) - 3: snake_case_ : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3] cagrams.append(__magic_name__ ) ((snake_case_), (snake_case_), (snake_case_)) : Optional[Any] = SARIngram(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ) ((snake_case_), (snake_case_), (snake_case_)) : Union[str, Any] = SARIngram(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ) ((snake_case_), (snake_case_), (snake_case_)) : str = SARIngram(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ) ((snake_case_), (snake_case_), (snake_case_)) : Union[str, Any] = SARIngram(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ) snake_case_ : Any = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 snake_case_ : Any = sum([delascore, delascore, delascore, delascore] ) / 4 snake_case_ : Optional[int] = sum([addascore, addascore, addascore, addascore] ) / 4 snake_case_ : List[Any] = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = True ,__magic_name__ = "13a" ,__magic_name__ = True )-> List[str]: """simple docstring""" if lowercase: snake_case_ : List[str] = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: snake_case_ : str = sacrebleu.metrics.bleu._get_tokenizer(__magic_name__ )()(__magic_name__ ) else: snake_case_ : List[str] = sacrebleu.TOKENIZERS[tokenizer]()(__magic_name__ ) elif tokenizer == "moses": snake_case_ : Union[str, Any] = sacremoses.MosesTokenizer().tokenize(__magic_name__ ,return_str=__magic_name__ ,escape=__magic_name__ ) elif tokenizer == "penn": snake_case_ : Any = sacremoses.MosesTokenizer().penn_tokenize(__magic_name__ ,return_str=__magic_name__ ) else: snake_case_ : Union[str, Any] = sentence if not return_str: snake_case_ : List[Any] = normalized_sent.split() return normalized_sent def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int: """simple docstring""" if not (len(__magic_name__ ) == len(__magic_name__ ) == len(__magic_name__ )): raise ValueError("Sources length must match predictions and references lengths." ) snake_case_ : Union[str, Any] = 0 for src, pred, refs in zip(__magic_name__ ,__magic_name__ ,__magic_name__ ): sari_score += SARIsent(normalize(__magic_name__ ) ,normalize(__magic_name__ ) ,[normalize(__magic_name__ ) for sent in refs] ) snake_case_ : str = sari_score / len(__magic_name__ ) return 100 * sari_score def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__="exp" ,__magic_name__=None ,__magic_name__=False ,__magic_name__=False ,__magic_name__=False ,)-> Union[str, Any]: """simple docstring""" snake_case_ : str = len(references[0] ) if any(len(__magic_name__ ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) snake_case_ : List[Any] = [[refs[i] for refs in references] for i in range(__magic_name__ )] snake_case_ : List[Any] = sacrebleu.corpus_bleu( __magic_name__ ,__magic_name__ ,smooth_method=__magic_name__ ,smooth_value=__magic_name__ ,force=__magic_name__ ,lowercase=__magic_name__ ,use_effective_order=__magic_name__ ,) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A_ (datasets.Metric ): """simple docstring""" def _A ( self :Optional[Any] ) -> List[Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=[ "https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py", "https://github.com/cocoxu/simplification/blob/master/SARI.py", "https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py", "https://github.com/mjpost/sacreBLEU", ] , reference_urls=[ "https://www.aclweb.org/anthology/Q16-1029.pdf", "https://github.com/mjpost/sacreBLEU", "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :str ) -> Any: '''simple docstring''' snake_case_ : Optional[int] = {} result.update({"sari": compute_sari(sources=lowerCAmelCase__ , predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )} ) result.update({"sacrebleu": compute_sacrebleu(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )} ) result.update({"exact": compute_em(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )} ) return result
656
'''simple docstring''' from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable __lowerCamelCase : Dict = { '''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''], '''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : int = [ '''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTNeoXJapaneseForCausalLM''', '''GPTNeoXJapaneseLayer''', '''GPTNeoXJapaneseModel''', '''GPTNeoXJapanesePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys __lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
656
1
'''simple docstring''' import gzip import hashlib import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser __lowerCamelCase : List[str] = re.compile(R'''\s+''') def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]: """simple docstring""" return {"hash": hashlib.mda(re.sub(__magic_name__ ,"" ,example["content"] ).encode("utf-8" ) ).hexdigest()} def __UpperCAmelCase ( __magic_name__ )-> str: """simple docstring""" snake_case_ : Optional[Any] = [len(__magic_name__ ) for line in example["content"].splitlines()] return {"line_mean": np.mean(__magic_name__ ), "line_max": max(__magic_name__ )} def __UpperCAmelCase ( __magic_name__ )-> int: """simple docstring""" snake_case_ : Optional[int] = np.mean([c.isalnum() for c in example["content"]] ) return {"alpha_frac": alpha_frac} def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Tuple: """simple docstring""" if example["hash"] in uniques: uniques.remove(example["hash"] ) return True else: return False def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 )-> Tuple: """simple docstring""" snake_case_ : List[str] = ["auto-generated", "autogenerated", "automatically generated"] snake_case_ : Optional[Any] = example["content"].splitlines() for _, line in zip(range(__magic_name__ ) ,__magic_name__ ): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 ,__magic_name__=0.05 )-> Optional[Any]: """simple docstring""" snake_case_ : str = ["unit tests", "test file", "configuration file"] snake_case_ : int = example["content"].splitlines() snake_case_ : Optional[Any] = 0 snake_case_ : Any = 0 # first test for _, line in zip(range(__magic_name__ ) ,__magic_name__ ): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test snake_case_ : Tuple = example["content"].count("\n" ) snake_case_ : int = int(coeff * nlines ) for line in lines: count_config += line.lower().count("config" ) count_test += line.lower().count("test" ) if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def __UpperCAmelCase ( __magic_name__ )-> str: """simple docstring""" snake_case_ : List[Any] = ["def ", "class ", "for ", "while "] snake_case_ : Optional[Any] = example["content"].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def __UpperCAmelCase ( __magic_name__ ,__magic_name__=4 )-> Optional[int]: """simple docstring""" snake_case_ : Tuple = example["content"].splitlines() snake_case_ : Tuple = 0 for line in lines: counter += line.lower().count("=" ) if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def __UpperCAmelCase ( __magic_name__ )-> List[Any]: """simple docstring""" snake_case_ : Tuple = tokenizer(example["content"] ,truncation=__magic_name__ )["input_ids"] snake_case_ : int = len(example["content"] ) / len(__magic_name__ ) return {"ratio": ratio} def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]: """simple docstring""" snake_case_ : Union[str, Any] = {} results.update(get_hash(__magic_name__ ) ) results.update(line_stats(__magic_name__ ) ) results.update(alpha_stats(__magic_name__ ) ) results.update(char_token_ratio(__magic_name__ ) ) results.update(is_autogenerated(__magic_name__ ) ) results.update(is_config_or_test(__magic_name__ ) ) results.update(has_no_keywords(__magic_name__ ) ) results.update(has_few_assignments(__magic_name__ ) ) return results def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Tuple: """simple docstring""" if not check_uniques(__magic_name__ ,__magic_name__ ): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def __UpperCAmelCase ( __magic_name__ )-> Dict: """simple docstring""" with open(__magic_name__ ,"rb" ) as f_in: with gzip.open(str(__magic_name__ ) + ".gz" ,"wb" ,compresslevel=6 ) as f_out: shutil.copyfileobj(__magic_name__ ,__magic_name__ ) os.unlink(__magic_name__ ) # Settings __lowerCamelCase : List[Any] = HfArgumentParser(PreprocessingArguments) __lowerCamelCase : str = parser.parse_args() if args.num_workers is None: __lowerCamelCase : List[Any] = multiprocessing.cpu_count() __lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset __lowerCamelCase : Any = time.time() __lowerCamelCase : str = load_dataset(args.dataset_name, split='''train''') print(f'''Time to load dataset: {time.time()-t_start:.2f}''') # Run preprocessing __lowerCamelCase : List[str] = time.time() __lowerCamelCase : Any = ds.map(preprocess, num_proc=args.num_workers) print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''') # Deduplicate hashes __lowerCamelCase : Any = set(ds.unique('''hash''')) __lowerCamelCase : Optional[int] = len(uniques) / len(ds) print(f'''Fraction of duplicates: {1-frac:.2%}''') # Deduplicate data and apply heuristics __lowerCamelCase : List[str] = time.time() __lowerCamelCase : Tuple = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args}) print(f'''Time to filter dataset: {time.time()-t_start:.2f}''') print(f'''Size of filtered dataset: {len(ds_filter)}''') # Deduplicate with minhash and jaccard similarity if args.near_deduplication: __lowerCamelCase : List[str] = time.time() __lowerCamelCase , __lowerCamelCase : Tuple = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''') print(f'''Size of deduplicate dataset: {len(ds_filter)}''') # Save data in batches of samples_per_file __lowerCamelCase : List[Any] = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / '''duplicate_clusters.json''', '''w''') as f: json.dump(duplicate_clusters, f) __lowerCamelCase : List[str] = output_dir / '''data''' data_dir.mkdir(exist_ok=True) __lowerCamelCase : int = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): __lowerCamelCase : Union[str, Any] = str(data_dir / f'''file-{file_number+1:012}.json''') __lowerCamelCase : List[Any] = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
656
'''simple docstring''' def __UpperCAmelCase ( __magic_name__ )-> list[int]: """simple docstring""" if length <= 0 or not isinstance(__magic_name__ ,__magic_name__ ): raise ValueError("Length must be a positive integer." ) return [n * (2 * n - 1) for n in range(__magic_name__ )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
656
1
'''simple docstring''' import importlib import math import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Tuple, Union import flax import jax.numpy as jnp from ..utils import BaseOutput __lowerCamelCase : List[str] = '''scheduler_config.json''' class A_ (a_ ): """simple docstring""" a__ = 1 a__ = 2 a__ = 3 a__ = 4 a__ = 5 @dataclass class A_ (a_ ): """simple docstring""" a__ = 42 class A_ : """simple docstring""" a__ = SCHEDULER_CONFIG_NAME a__ = ['''dtype'''] a__ = [] a__ = True @classmethod def _A ( cls :Dict , lowerCAmelCase__ :Dict[str, Any] = None , lowerCAmelCase__ :Optional[str] = None , lowerCAmelCase__ :Tuple=False , **lowerCAmelCase__ :Optional[int] , ) -> Dict: '''simple docstring''' snake_case_, snake_case_ : int = cls.load_config( pretrained_model_name_or_path=lowerCAmelCase__ , subfolder=lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ , **lowerCAmelCase__ , ) snake_case_, snake_case_ : Optional[int] = cls.from_config(lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ , **lowerCAmelCase__ ) if hasattr(lowerCAmelCase__ , "create_state" ) and getattr(lowerCAmelCase__ , "has_state" , lowerCAmelCase__ ): snake_case_ : Optional[Any] = scheduler.create_state() if return_unused_kwargs: return scheduler, state, unused_kwargs return scheduler, state def _A ( self :Dict , lowerCAmelCase__ :Union[str, os.PathLike] , lowerCAmelCase__ :bool = False , **lowerCAmelCase__ :int ) -> Any: '''simple docstring''' self.save_config(save_directory=lowerCAmelCase__ , push_to_hub=lowerCAmelCase__ , **lowerCAmelCase__ ) @property def _A ( self :Optional[int] ) -> Optional[int]: '''simple docstring''' return self._get_compatibles() @classmethod def _A ( cls :Union[str, Any] ) -> Tuple: '''simple docstring''' snake_case_ : Any = list(set([cls.__name__] + cls._compatibles ) ) snake_case_ : int = importlib.import_module(__name__.split("." )[0] ) snake_case_ : Union[str, Any] = [ getattr(lowerCAmelCase__ , lowerCAmelCase__ ) for c in compatible_classes_str if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) ] return compatible_classes def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> jnp.ndarray: """simple docstring""" assert len(__magic_name__ ) >= x.ndim return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(__magic_name__ ) - x.ndim) ) ,__magic_name__ ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__=0.999 ,__magic_name__=jnp.floataa )-> jnp.ndarray: """simple docstring""" def alpha_bar(__magic_name__ ): return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2 snake_case_ : str = [] for i in range(__magic_name__ ): snake_case_ : Optional[Any] = i / num_diffusion_timesteps snake_case_ : List[Any] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(__magic_name__ ) / alpha_bar(__magic_name__ ) ,__magic_name__ ) ) return jnp.array(__magic_name__ ,dtype=__magic_name__ ) @flax.struct.dataclass class A_ : """simple docstring""" a__ = 42 a__ = 42 a__ = 42 @classmethod def _A ( cls :str , lowerCAmelCase__ :Tuple ) -> Dict: '''simple docstring''' snake_case_ : List[Any] = scheduler.config if config.trained_betas is not None: snake_case_ : List[Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype ) elif config.beta_schedule == "linear": snake_case_ : Optional[Any] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype ) elif config.beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. snake_case_ : Union[str, Any] = ( jnp.linspace( config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype ) ** 2 ) elif config.beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule snake_case_ : Any = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype ) else: raise NotImplementedError( F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' ) snake_case_ : List[Any] = 1.0 - betas snake_case_ : Any = jnp.cumprod(lowerCAmelCase__ , axis=0 ) return cls( alphas=lowerCAmelCase__ , betas=lowerCAmelCase__ , alphas_cumprod=lowerCAmelCase__ , ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> Any: """simple docstring""" snake_case_ : int = state.alphas_cumprod snake_case_ : Optional[int] = alphas_cumprod[timesteps] ** 0.5 snake_case_ : Dict = sqrt_alpha_prod.flatten() snake_case_ : str = broadcast_to_shape_from_left(__magic_name__ ,original_samples.shape ) snake_case_ : List[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5 snake_case_ : int = sqrt_one_minus_alpha_prod.flatten() snake_case_ : List[Any] = broadcast_to_shape_from_left(__magic_name__ ,original_samples.shape ) return sqrt_alpha_prod, sqrt_one_minus_alpha_prod def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> int: """simple docstring""" snake_case_, snake_case_ : Union[str, Any] = get_sqrt_alpha_prod(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ) snake_case_ : List[Any] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> Union[str, Any]: """simple docstring""" snake_case_, snake_case_ : Tuple = get_sqrt_alpha_prod(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ) snake_case_ : Optional[Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity
656
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def __UpperCAmelCase ( __magic_name__=None )-> List[str]: """simple docstring""" if subparsers is not None: snake_case_ : List[str] = subparsers.add_parser("test" ) else: snake_case_ : List[Any] = argparse.ArgumentParser("Accelerate test command" ) parser.add_argument( "--config_file" ,default=__magic_name__ ,help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) ,) if subparsers is not None: parser.set_defaults(func=__magic_name__ ) return parser def __UpperCAmelCase ( __magic_name__ )-> Tuple: """simple docstring""" snake_case_ : Optional[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] ) if args.config_file is None: snake_case_ : str = script_name else: snake_case_ : Any = F'''--config_file={args.config_file} {script_name}''' snake_case_ : Union[str, Any] = ["accelerate-launch"] + test_args.split() snake_case_ : Optional[int] = execute_subprocess_async(__magic_name__ ,env=os.environ.copy() ) if result.returncode == 0: print("Test is a success! You are ready for your distributed training!" ) def __UpperCAmelCase ( )-> int: """simple docstring""" snake_case_ : Dict = test_command_parser() snake_case_ : Dict = parser.parse_args() test_command(__magic_name__ ) if __name__ == "__main__": main()
656
1
'''simple docstring''' import json import os from pathlib import Path import pytest from datasets.download.download_config import DownloadConfig from datasets.download.download_manager import DownloadManager from datasets.utils.file_utils import hash_url_to_filename __lowerCamelCase : List[Any] = '''http://www.mocksite.com/file1.txt''' __lowerCamelCase : Dict = '''"text": ["foo", "foo"]''' __lowerCamelCase : str = '''6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8''' class A_ : """simple docstring""" a__ = 200 a__ = {'''Content-Length''': '''100'''} a__ = {} def _A ( self :Dict , **lowerCAmelCase__ :Optional[int] ) -> List[Any]: '''simple docstring''' return [bytes(lowerCAmelCase__ , "utf-8" )] def __UpperCAmelCase ( *__magic_name__ ,**__magic_name__ )-> str: """simple docstring""" return MockResponse() @pytest.mark.parametrize("urls_type" ,[str, list, dict] ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Dict: """simple docstring""" import requests monkeypatch.setattr(__magic_name__ ,"request" ,__magic_name__ ) snake_case_ : Any = URL if issubclass(__magic_name__ ,__magic_name__ ): snake_case_ : Dict = url elif issubclass(__magic_name__ ,__magic_name__ ): snake_case_ : Optional[Any] = [url] elif issubclass(__magic_name__ ,__magic_name__ ): snake_case_ : Tuple = {"train": url} snake_case_ : Optional[Any] = "dummy" snake_case_ : Dict = "downloads" snake_case_ : int = tmp_path snake_case_ : List[str] = DownloadConfig( cache_dir=os.path.join(__magic_name__ ,__magic_name__ ) ,use_etag=__magic_name__ ,) snake_case_ : List[Any] = DownloadManager(dataset_name=__magic_name__ ,download_config=__magic_name__ ) snake_case_ : Any = dl_manager.download(__magic_name__ ) snake_case_ : int = urls for downloaded_paths in [downloaded_paths]: if isinstance(__magic_name__ ,__magic_name__ ): snake_case_ : int = [downloaded_paths] snake_case_ : int = [urls] elif isinstance(__magic_name__ ,__magic_name__ ): assert "train" in downloaded_paths.keys() snake_case_ : Tuple = downloaded_paths.values() snake_case_ : List[str] = urls.values() assert downloaded_paths for downloaded_path, input_url in zip(__magic_name__ ,__magic_name__ ): assert downloaded_path == dl_manager.downloaded_paths[input_url] snake_case_ : Union[str, Any] = Path(__magic_name__ ) snake_case_ : Tuple = downloaded_path.parts assert parts[-1] == HASH assert parts[-2] == cache_subdir assert downloaded_path.exists() snake_case_ : List[Any] = downloaded_path.read_text() assert content == CONTENT snake_case_ : Tuple = downloaded_path.with_suffix(".json" ) assert metadata_downloaded_path.exists() snake_case_ : str = json.loads(metadata_downloaded_path.read_text() ) assert metadata_content == {"url": URL, "etag": None} @pytest.mark.parametrize("paths_type" ,[str, list, dict] ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Dict: """simple docstring""" snake_case_ : Tuple = str(__magic_name__ ) if issubclass(__magic_name__ ,__magic_name__ ): snake_case_ : Optional[Any] = filename elif issubclass(__magic_name__ ,__magic_name__ ): snake_case_ : str = [filename] elif issubclass(__magic_name__ ,__magic_name__ ): snake_case_ : List[Any] = {"train": filename} snake_case_ : str = "dummy" snake_case_ : int = xz_file.parent snake_case_ : str = "extracted" snake_case_ : Optional[Any] = DownloadConfig( cache_dir=__magic_name__ ,use_etag=__magic_name__ ,) snake_case_ : str = DownloadManager(dataset_name=__magic_name__ ,download_config=__magic_name__ ) snake_case_ : int = dl_manager.extract(__magic_name__ ) snake_case_ : int = paths for extracted_paths in [extracted_paths]: if isinstance(__magic_name__ ,__magic_name__ ): snake_case_ : List[str] = [extracted_paths] snake_case_ : int = [paths] elif isinstance(__magic_name__ ,__magic_name__ ): assert "train" in extracted_paths.keys() snake_case_ : List[Any] = extracted_paths.values() snake_case_ : Optional[Any] = paths.values() assert extracted_paths for extracted_path, input_path in zip(__magic_name__ ,__magic_name__ ): assert extracted_path == dl_manager.extracted_paths[input_path] snake_case_ : str = Path(__magic_name__ ) snake_case_ : int = extracted_path.parts assert parts[-1] == hash_url_to_filename(__magic_name__ ,etag=__magic_name__ ) assert parts[-2] == extracted_subdir assert extracted_path.exists() snake_case_ : Union[str, Any] = extracted_path.read_text() snake_case_ : Dict = text_file.read_text() assert extracted_file_content == expected_file_content def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[str]: """simple docstring""" assert path.endswith(".jsonl" ) for num_items, line in enumerate(__magic_name__ ,start=1 ): snake_case_ : Union[str, Any] = json.loads(line.decode("utf-8" ) ) assert item.keys() == {"col_1", "col_2", "col_3"} assert num_items == 4 @pytest.mark.parametrize("archive_jsonl" ,["tar_jsonl_path", "zip_jsonl_path"] ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Optional[Any]: """simple docstring""" snake_case_ : List[str] = request.getfixturevalue(__magic_name__ ) snake_case_ : Dict = DownloadManager() for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__magic_name__ ) ,start=1 ): _test_jsonl(__magic_name__ ,__magic_name__ ) assert num_jsonl == 2 @pytest.mark.parametrize("archive_nested_jsonl" ,["tar_nested_jsonl_path", "zip_nested_jsonl_path"] ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Any: """simple docstring""" snake_case_ : Optional[Any] = request.getfixturevalue(__magic_name__ ) snake_case_ : List[str] = DownloadManager() for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__magic_name__ ) ,start=1 ): for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__magic_name__ ) ,start=1 ): _test_jsonl(__magic_name__ ,__magic_name__ ) assert num_tar == 1 assert num_jsonl == 2 def __UpperCAmelCase ( __magic_name__ )-> List[str]: """simple docstring""" snake_case_ : Union[str, Any] = DownloadManager() for num_file, file in enumerate(dl_manager.iter_files(__magic_name__ ) ,start=1 ): assert os.path.basename(__magic_name__ ) == ("test.txt" if num_file == 1 else "train.txt") assert num_file == 2
656
'''simple docstring''' from scipy.stats import spearmanr import datasets __lowerCamelCase : str = ''' The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. ''' __lowerCamelCase : int = ''' Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {\'spearmanr\': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results[\'spearmanr\']) -0.7 >>> print(round(results[\'spearmanr_pvalue\'], 2)) 0.19 ''' __lowerCamelCase : List[str] = R'''\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A_ (datasets.Metric ): """simple docstring""" def _A ( self :str ) -> Union[str, Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , ) def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any]=False ) -> List[Any]: '''simple docstring''' snake_case_ : Optional[Any] = spearmanr(lowerCAmelCase__ , lowerCAmelCase__ ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
656
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __lowerCamelCase : List[Any] = { '''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''], '''processing_layoutlmv2''': ['''LayoutLMv2Processor'''], '''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Union[str, Any] = ['''LayoutLMv2TokenizerFast'''] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Optional[int] = ['''LayoutLMv2FeatureExtractor'''] __lowerCamelCase : Union[str, Any] = ['''LayoutLMv2ImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : str = [ '''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LayoutLMv2ForQuestionAnswering''', '''LayoutLMv2ForSequenceClassification''', '''LayoutLMv2ForTokenClassification''', '''LayoutLMv2Layer''', '''LayoutLMv2Model''', '''LayoutLMv2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys __lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
656
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): __lowerCamelCase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right __lowerCamelCase : str = 128022 __lowerCamelCase : List[Any] = 128028 @require_sentencepiece class A_ (a_ , unittest.TestCase ): """simple docstring""" a__ = MaMaaaTokenizer a__ = False a__ = False a__ = True def _A ( self :Union[str, Any] ) -> List[str]: '''simple docstring''' super().setUp() snake_case_ : int = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"] snake_case_ : Any = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) snake_case_ : Optional[int] = Path(self.tmpdirname ) save_json(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["spm_file"] ) snake_case_ : Union[str, Any] = MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def _A ( self :List[Any] , **lowerCAmelCase__ :List[Any] ) -> str: '''simple docstring''' return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def _A ( self :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[int]: '''simple docstring''' return ( "This is a test", "This is a test", ) def _A ( self :List[str] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : str = "</s>" snake_case_ : Union[str, Any] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ ) def _A ( self :Union[str, Any] ) -> List[str]: '''simple docstring''' snake_case_ : Union[str, Any] = self.get_tokenizer() snake_case_ : Any = list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "</s>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "<s>" ) self.assertEqual(len(lowerCAmelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip("Skip this test while all models are still to be uploaded." ) def _A ( self :List[Any] ) -> Union[str, Any]: '''simple docstring''' pass def _A ( self :Optional[int] ) -> int: '''simple docstring''' snake_case_ : int = self.get_tokenizer() snake_case_ : List[str] = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [2, 3, 4, 5, 6] , ) snake_case_ : Any = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) snake_case_ : Any = tokenizer.convert_tokens_to_string(lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , "This is a test" ) @slow def _A ( self :Any ) -> List[Any]: '''simple docstring''' snake_case_ : int = {"input_ids": [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase__ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , ) @require_torch @require_sentencepiece @require_tokenizers class A_ (unittest.TestCase ): """simple docstring""" a__ = '''facebook/m2m100_418M''' a__ = [ '''In my opinion, there are two levels of response from the French government.''', '''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''', ] a__ = [ '''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''', '''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''', ] # fmt: off a__ = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2] @classmethod def _A ( cls :str ) -> int: '''simple docstring''' snake_case_ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en" , tgt_lang="fr" ) snake_case_ : List[str] = 1 return cls def _A ( self :Tuple ) -> Union[str, Any]: '''simple docstring''' self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 128_006 ) self.assertEqual(self.tokenizer.get_lang_id("en" ) , 128_022 ) self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 128_076 ) self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 128_063 ) def _A ( self :Optional[int] ) -> List[str]: '''simple docstring''' snake_case_ : Dict = self.tokenizer.get_vocab() self.assertEqual(len(lowerCAmelCase__ ) , self.tokenizer.vocab_size ) self.assertEqual(vocab["<unk>"] , 3 ) self.assertIn(self.tokenizer.get_lang_token("en" ) , lowerCAmelCase__ ) def _A ( self :Any ) -> Dict: '''simple docstring''' snake_case_ : List[str] = "en" snake_case_ : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ ) def _A ( self :Union[str, Any] ) -> Dict: '''simple docstring''' self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids ) # fmt: off snake_case_ : Dict = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2] # fmt: on snake_case_ : List[str] = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) snake_case_ : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ ) def _A ( self :Tuple ) -> Tuple: '''simple docstring''' snake_case_ : Union[str, Any] = tempfile.mkdtemp() snake_case_ : int = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(lowerCAmelCase__ ) snake_case_ : List[str] = MaMaaaTokenizer.from_pretrained(lowerCAmelCase__ ) self.assertDictEqual(new_tok.lang_token_to_id , lowerCAmelCase__ ) @require_torch def _A ( self :Optional[Any] ) -> str: '''simple docstring''' snake_case_ : Union[str, Any] = "en" snake_case_ : Tuple = "fr" snake_case_ : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors="pt" ) snake_case_ : Dict = shift_tokens_right( batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: snake_case_ : str = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def _A ( self :Optional[Any] ) -> Tuple: '''simple docstring''' snake_case_ : List[str] = "mr" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) snake_case_ : int = "zh" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def _A ( self :str ) -> int: '''simple docstring''' snake_case_ : Dict = "mr" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) snake_case_ : Tuple = "zh" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def _A ( self :Optional[Any] ) -> Optional[int]: '''simple docstring''' snake_case_ : Optional[int] = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { # en_XX, A, test, EOS "input_ids": [[128_022, 58, 4_183, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 128_006, } , )
656
1
'''simple docstring''' import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class A_ (a_ , a_ , unittest.TestCase ): """simple docstring""" a__ = IFImgaImgSuperResolutionPipeline a__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''} a__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} ) a__ = PipelineTesterMixin.required_optional_params - {'''latents'''} def _A ( self :Tuple ) -> List[Any]: '''simple docstring''' return self._get_superresolution_dummy_components() def _A ( self :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any]=0 ) -> str: '''simple docstring''' if str(lowerCAmelCase__ ).startswith("mps" ): snake_case_ : Union[str, Any] = torch.manual_seed(lowerCAmelCase__ ) else: snake_case_ : Optional[Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) snake_case_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ ) snake_case_ : List[Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ ) snake_case_ : str = { "prompt": "A painting of a squirrel eating a burger", "image": image, "original_image": original_image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def _A ( self :Tuple ) -> List[Any]: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def _A ( self :str ) -> Tuple: '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def _A ( self :Optional[Any] ) -> Optional[Any]: '''simple docstring''' super().test_save_load_floataa(expected_max_diff=1E-1 ) def _A ( self :Dict ) -> Union[str, Any]: '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def _A ( self :Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' self._test_save_load_local() def _A ( self :Dict ) -> Tuple: '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
656
'''simple docstring''' import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py __lowerCamelCase : str = '''.''' # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) __lowerCamelCase : Tuple = [ '''Assert''', '''AssignVariableOp''', '''EmptyTensorList''', '''MergeV2Checkpoints''', '''ReadVariableOp''', '''ResourceGather''', '''RestoreV2''', '''SaveV2''', '''ShardedFilename''', '''StatefulPartitionedCall''', '''StaticRegexFullMatch''', '''VarHandleOp''', ] def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[str]: """simple docstring""" snake_case_ : Tuple = SavedModel() snake_case_ : Dict = [] with open(os.path.join(__magic_name__ ,"utils" ,"tf_ops" ,"onnx.json" ) ) as f: snake_case_ : Dict = json.load(__magic_name__ )["opsets"] for i in range(1 ,opset + 1 ): onnx_ops.extend(onnx_opsets[str(__magic_name__ )] ) with open(__magic_name__ ,"rb" ) as f: saved_model.ParseFromString(f.read() ) snake_case_ : Tuple = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want snake_case_ : str = sorted(__magic_name__ ) snake_case_ : Optional[int] = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(__magic_name__ ) if strict and len(__magic_name__ ) > 0: raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops ) elif len(__magic_name__ ) > 0: print(F'''Found the following incompatible ops for the opset {opset}:''' ) print(*__magic_name__ ,sep="\n" ) else: print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' ) if __name__ == "__main__": __lowerCamelCase : Any = argparse.ArgumentParser() parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''') parser.add_argument( '''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.''' ) parser.add_argument( '''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.''' ) parser.add_argument( '''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)''' ) __lowerCamelCase : Dict = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
656
1
'''simple docstring''' import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef __lowerCamelCase : Dict = ( '''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Union[str, Any]: """simple docstring""" warnings.warn(__magic_name__ ,__magic_name__ ) requires_backends(__magic_name__ ,"sklearn" ) return (preds == labels).mean() def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Dict: """simple docstring""" warnings.warn(__magic_name__ ,__magic_name__ ) requires_backends(__magic_name__ ,"sklearn" ) snake_case_ : int = simple_accuracy(__magic_name__ ,__magic_name__ ) snake_case_ : Any = fa_score(y_true=__magic_name__ ,y_pred=__magic_name__ ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str: """simple docstring""" warnings.warn(__magic_name__ ,__magic_name__ ) requires_backends(__magic_name__ ,"sklearn" ) snake_case_ : Dict = pearsonr(__magic_name__ ,__magic_name__ )[0] snake_case_ : Dict = spearmanr(__magic_name__ ,__magic_name__ )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[Any]: """simple docstring""" warnings.warn(__magic_name__ ,__magic_name__ ) requires_backends(__magic_name__ ,"sklearn" ) assert len(__magic_name__ ) == len(__magic_name__ ), F'''Predictions and labels have mismatched lengths {len(__magic_name__ )} and {len(__magic_name__ )}''' if task_name == "cola": return {"mcc": matthews_corrcoef(__magic_name__ ,__magic_name__ )} elif task_name == "sst-2": return {"acc": simple_accuracy(__magic_name__ ,__magic_name__ )} elif task_name == "mrpc": return acc_and_fa(__magic_name__ ,__magic_name__ ) elif task_name == "sts-b": return pearson_and_spearman(__magic_name__ ,__magic_name__ ) elif task_name == "qqp": return acc_and_fa(__magic_name__ ,__magic_name__ ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(__magic_name__ ,__magic_name__ )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(__magic_name__ ,__magic_name__ )} elif task_name == "qnli": return {"acc": simple_accuracy(__magic_name__ ,__magic_name__ )} elif task_name == "rte": return {"acc": simple_accuracy(__magic_name__ ,__magic_name__ )} elif task_name == "wnli": return {"acc": simple_accuracy(__magic_name__ ,__magic_name__ )} elif task_name == "hans": return {"acc": simple_accuracy(__magic_name__ ,__magic_name__ )} else: raise KeyError(__magic_name__ ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> str: """simple docstring""" warnings.warn(__magic_name__ ,__magic_name__ ) requires_backends(__magic_name__ ,"sklearn" ) if len(__magic_name__ ) != len(__magic_name__ ): raise ValueError(F'''Predictions and labels have mismatched lengths {len(__magic_name__ )} and {len(__magic_name__ )}''' ) if task_name == "xnli": return {"acc": simple_accuracy(__magic_name__ ,__magic_name__ )} else: raise KeyError(__magic_name__ )
656
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal __lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__) __lowerCamelCase : List[str] = ['''names''', '''prefix'''] __lowerCamelCase : int = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols'''] __lowerCamelCase : str = ['''encoding_errors''', '''on_bad_lines'''] __lowerCamelCase : Optional[Any] = ['''date_format'''] @dataclass class A_ (datasets.BuilderConfig ): """simple docstring""" a__ = "," a__ = None a__ = "infer" a__ = None a__ = None a__ = None a__ = None a__ = None a__ = True a__ = None a__ = None a__ = None a__ = None a__ = False a__ = None a__ = None a__ = None a__ = True a__ = True a__ = False a__ = True a__ = None a__ = "." a__ = None a__ = '"' a__ = 0 a__ = None a__ = None a__ = None a__ = None a__ = True a__ = True a__ = 0 a__ = True a__ = False a__ = None a__ = 10000 a__ = None a__ = "strict" a__ = "error" a__ = None def _A ( self :List[str] ) -> Any: '''simple docstring''' if self.delimiter is not None: snake_case_ : Tuple = self.delimiter if self.column_names is not None: snake_case_ : List[Any] = self.column_names @property def _A ( self :Optional[Any] ) -> int: '''simple docstring''' snake_case_ : Optional[int] = { "sep": self.sep, "header": self.header, "names": self.names, "index_col": self.index_col, "usecols": self.usecols, "prefix": self.prefix, "mangle_dupe_cols": self.mangle_dupe_cols, "engine": self.engine, "converters": self.converters, "true_values": self.true_values, "false_values": self.false_values, "skipinitialspace": self.skipinitialspace, "skiprows": self.skiprows, "nrows": self.nrows, "na_values": self.na_values, "keep_default_na": self.keep_default_na, "na_filter": self.na_filter, "verbose": self.verbose, "skip_blank_lines": self.skip_blank_lines, "thousands": self.thousands, "decimal": self.decimal, "lineterminator": self.lineterminator, "quotechar": self.quotechar, "quoting": self.quoting, "escapechar": self.escapechar, "comment": self.comment, "encoding": self.encoding, "dialect": self.dialect, "error_bad_lines": self.error_bad_lines, "warn_bad_lines": self.warn_bad_lines, "skipfooter": self.skipfooter, "doublequote": self.doublequote, "memory_map": self.memory_map, "float_precision": self.float_precision, "chunksize": self.chunksize, "encoding_errors": self.encoding_errors, "on_bad_lines": self.on_bad_lines, "date_format": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class A_ (datasets.ArrowBasedBuilder ): """simple docstring""" a__ = CsvConfig def _A ( self :Optional[Any] ) -> Optional[Any]: '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def _A ( self :Tuple , lowerCAmelCase__ :Dict ) -> List[Any]: '''simple docstring''' if not self.config.data_files: raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) snake_case_ : Optional[Any] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(lowerCAmelCase__ , (str, list, tuple) ): snake_case_ : int = data_files if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): snake_case_ : List[str] = [files] snake_case_ : Tuple = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] snake_case_ : str = [] for split_name, files in data_files.items(): if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): snake_case_ : str = [files] snake_case_ : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files] splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) ) return splits def _A ( self :List[Any] , lowerCAmelCase__ :pa.Table ) -> pa.Table: '''simple docstring''' if self.config.features is not None: snake_case_ : int = self.config.features.arrow_schema if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ): # cheaper cast snake_case_ : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example snake_case_ : Dict = table_cast(lowerCAmelCase__ , lowerCAmelCase__ ) return pa_table def _A ( self :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]: '''simple docstring''' snake_case_ : Tuple = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str snake_case_ : str = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ): snake_case_ : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(lowerCAmelCase__ ): snake_case_ : Optional[int] = pa.Table.from_pandas(lowerCAmelCase__ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ ) except ValueError as e: logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCAmelCase__ )}: {e}''' ) raise
656
1
'''simple docstring''' def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> None: """simple docstring""" snake_case_ : List[Any] = len(__magic_name__ ) print("The following activities are selected:" ) # The first activity is always selected snake_case_ : str = 0 print(__magic_name__ ,end="," ) # Consider rest of the activities for j in range(__magic_name__ ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(__magic_name__ ,end="," ) snake_case_ : Optional[Any] = j if __name__ == "__main__": import doctest doctest.testmod() __lowerCamelCase : int = [1, 3, 0, 5, 8, 5] __lowerCamelCase : Any = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
656
'''simple docstring''' import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A_ (a_ , unittest.TestCase ): """simple docstring""" a__ = MgpstrTokenizer a__ = False a__ = {} a__ = False def _A ( self :List[str] ) -> List[str]: '''simple docstring''' super().setUp() # fmt: off snake_case_ : Dict = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"] # fmt: on snake_case_ : List[str] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCAmelCase__ ) + "\n" ) def _A ( self :Optional[Any] , **lowerCAmelCase__ :Optional[Any] ) -> Dict: '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def _A ( self :Dict , lowerCAmelCase__ :Any ) -> str: '''simple docstring''' snake_case_ : Dict = "tester" snake_case_ : Tuple = "tester" return input_text, output_text @unittest.skip("MGP-STR always lower cases letters." ) def _A ( self :Dict ) -> str: '''simple docstring''' pass def _A ( self :Tuple ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[str] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): snake_case_ : Tuple = "[SPECIAL_TOKEN]" tokenizer.add_special_tokens({"cls_token": special_token} ) snake_case_ : str = tokenizer.encode([special_token] , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(len(lowerCAmelCase__ ) , 1 ) snake_case_ : Tuple = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) self.assertTrue(special_token not in decoded ) def _A ( self :int ) -> List[str]: '''simple docstring''' snake_case_ : Dict = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): snake_case_, snake_case_ : str = self.get_input_output_texts(lowerCAmelCase__ ) snake_case_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__ ) snake_case_ : List[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) snake_case_ : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ ) self.assertNotEqual(len(lowerCAmelCase__ ) , 0 ) snake_case_ : List[str] = tokenizer.decode(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertEqual(text_a.replace(" " , "" ) , lowerCAmelCase__ ) @unittest.skip("MGP-STR tokenizer only handles one sequence." ) def _A ( self :Union[str, Any] ) -> Any: '''simple docstring''' pass @unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" ) def _A ( self :int ) -> Dict: '''simple docstring''' pass
656
1
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def __UpperCAmelCase ( __magic_name__ ,__magic_name__=False )-> Any: """simple docstring""" snake_case_ : Union[str, Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("module.cls_token", "vit.embeddings.cls_token"), ("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"), ("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"), ("module.pos_embed", "vit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("module.norm.weight", "layernorm.weight"), ("module.norm.bias", "layernorm.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" snake_case_ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__=False )-> List[str]: """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: snake_case_ : Dict = "" else: snake_case_ : Tuple = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case_ : Tuple = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' ) snake_case_ : Tuple = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict snake_case_ : List[Any] = in_proj_weight[ : config.hidden_size, : ] snake_case_ : str = in_proj_bias[: config.hidden_size] snake_case_ : Tuple = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case_ : str = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case_ : int = in_proj_weight[ -config.hidden_size :, : ] snake_case_ : Optional[int] = in_proj_bias[-config.hidden_size :] def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]: """simple docstring""" snake_case_ : Union[str, Any] = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(__magic_name__ ,__magic_name__ ) def __UpperCAmelCase ( __magic_name__ )-> Tuple: """simple docstring""" snake_case_ : str = [ "module.fc.fc1.weight", "module.fc.fc1.bias", "module.fc.bn1.weight", "module.fc.bn1.bias", "module.fc.bn1.running_mean", "module.fc.bn1.running_var", "module.fc.bn1.num_batches_tracked", "module.fc.fc2.weight", "module.fc.fc2.bias", "module.fc.bn2.weight", "module.fc.bn2.bias", "module.fc.bn2.running_mean", "module.fc.bn2.running_var", "module.fc.bn2.num_batches_tracked", "module.fc.fc3.weight", "module.fc.fc3.bias", ] for k in ignore_keys: state_dict.pop(__magic_name__ ,__magic_name__ ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[Any]: """simple docstring""" snake_case_ : Dict = dct.pop(__magic_name__ ) snake_case_ : Optional[int] = val def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Union[str, Any]: """simple docstring""" snake_case_ : Tuple = ViTMSNConfig() snake_case_ : Optional[Any] = 1000 snake_case_ : Any = "datasets/huggingface/label-files" snake_case_ : Tuple = "imagenet-1k-id2label.json" snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ) ,"r" ) ) snake_case_ : List[str] = {int(__magic_name__ ): v for k, v in idalabel.items()} snake_case_ : Optional[int] = idalabel snake_case_ : Tuple = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: snake_case_ : int = 384 snake_case_ : Dict = 1536 snake_case_ : int = 6 elif "l16" in checkpoint_url: snake_case_ : List[Any] = 1024 snake_case_ : Dict = 4096 snake_case_ : Optional[Any] = 24 snake_case_ : int = 16 snake_case_ : Union[str, Any] = 0.1 elif "b4" in checkpoint_url: snake_case_ : Dict = 4 elif "l7" in checkpoint_url: snake_case_ : List[str] = 7 snake_case_ : Tuple = 1024 snake_case_ : Any = 4096 snake_case_ : Optional[Any] = 24 snake_case_ : Dict = 16 snake_case_ : str = 0.1 snake_case_ : Tuple = ViTMSNModel(__magic_name__ ) snake_case_ : Tuple = torch.hub.load_state_dict_from_url(__magic_name__ ,map_location="cpu" )["target_encoder"] snake_case_ : str = ViTImageProcessor(size=config.image_size ) remove_projection_head(__magic_name__ ) snake_case_ : Optional[Any] = create_rename_keys(__magic_name__ ,base_model=__magic_name__ ) for src, dest in rename_keys: rename_key(__magic_name__ ,__magic_name__ ,__magic_name__ ) read_in_q_k_v(__magic_name__ ,__magic_name__ ,base_model=__magic_name__ ) model.load_state_dict(__magic_name__ ) model.eval() snake_case_ : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" snake_case_ : Union[str, Any] = Image.open(requests.get(__magic_name__ ,stream=__magic_name__ ).raw ) snake_case_ : Optional[Any] = ViTImageProcessor( size=config.image_size ,image_mean=__magic_name__ ,image_std=__magic_name__ ) snake_case_ : int = image_processor(images=__magic_name__ ,return_tensors="pt" ) # forward pass torch.manual_seed(2 ) snake_case_ : Dict = model(**__magic_name__ ) snake_case_ : Optional[int] = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: snake_case_ : int = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] ) elif "b16" in checkpoint_url: snake_case_ : Optional[Any] = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] ) elif "l16" in checkpoint_url: snake_case_ : int = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] ) elif "b4" in checkpoint_url: snake_case_ : Tuple = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] ) else: snake_case_ : Any = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] ,__magic_name__ ,atol=1E-4 ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__magic_name__ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__magic_name__ ) if __name__ == "__main__": __lowerCamelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) __lowerCamelCase : Any = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
656
'''simple docstring''' from __future__ import annotations import math import numpy as np from numpy.linalg import norm def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float: """simple docstring""" return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__magic_name__ ,__magic_name__ ) ) ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> list[list[list[float] | float]]: """simple docstring""" if dataset.ndim != value_array.ndim: snake_case_ : int = ( "Wrong input data's dimensions... " F'''dataset : {dataset.ndim}, value_array : {value_array.ndim}''' ) raise ValueError(__magic_name__ ) try: if dataset.shape[1] != value_array.shape[1]: snake_case_ : Dict = ( "Wrong input data's shape... " F'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}''' ) raise ValueError(__magic_name__ ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError("Wrong shape" ) if dataset.dtype != value_array.dtype: snake_case_ : Dict = ( "Input data have different datatype... " F'''dataset : {dataset.dtype}, value_array : {value_array.dtype}''' ) raise TypeError(__magic_name__ ) snake_case_ : Optional[int] = [] for value in value_array: snake_case_ : List[str] = euclidean(__magic_name__ ,dataset[0] ) snake_case_ : int = dataset[0].tolist() for dataset_value in dataset[1:]: snake_case_ : Optional[Any] = euclidean(__magic_name__ ,__magic_name__ ) if dist > temp_dist: snake_case_ : Tuple = temp_dist snake_case_ : Optional[int] = dataset_value.tolist() answer.append([vector, dist] ) return answer def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float: """simple docstring""" return np.dot(__magic_name__ ,__magic_name__ ) / (norm(__magic_name__ ) * norm(__magic_name__ )) if __name__ == "__main__": import doctest doctest.testmod()
656
1
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() __lowerCamelCase : Any = logging.get_logger(__name__) def __UpperCAmelCase ( __magic_name__ )-> Tuple: """simple docstring""" snake_case_ : Union[str, Any] = OrderedDict() for key, value in state_dict.items(): if key.startswith("module.encoder" ): snake_case_ : List[str] = key.replace("module.encoder" ,"glpn.encoder" ) if key.startswith("module.decoder" ): snake_case_ : int = key.replace("module.decoder" ,"decoder.stages" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 snake_case_ : Union[str, Any] = key[key.find("patch_embed" ) + len("patch_embed" )] snake_case_ : Dict = key.replace(F'''patch_embed{idx}''' ,F'''patch_embeddings.{int(__magic_name__ )-1}''' ) if "norm" in key: snake_case_ : Dict = key.replace("norm" ,"layer_norm" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 snake_case_ : int = key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )] snake_case_ : Tuple = key.replace(F'''layer_norm{idx}''' ,F'''layer_norm.{int(__magic_name__ )-1}''' ) if "layer_norm1" in key: snake_case_ : Any = key.replace("layer_norm1" ,"layer_norm_1" ) if "layer_norm2" in key: snake_case_ : Any = key.replace("layer_norm2" ,"layer_norm_2" ) if "block" in key: # replace for example block1 by block.0 snake_case_ : str = key[key.find("block" ) + len("block" )] snake_case_ : Tuple = key.replace(F'''block{idx}''' ,F'''block.{int(__magic_name__ )-1}''' ) if "attn.q" in key: snake_case_ : Any = key.replace("attn.q" ,"attention.self.query" ) if "attn.proj" in key: snake_case_ : Tuple = key.replace("attn.proj" ,"attention.output.dense" ) if "attn" in key: snake_case_ : Optional[int] = key.replace("attn" ,"attention.self" ) if "fc1" in key: snake_case_ : Dict = key.replace("fc1" ,"dense1" ) if "fc2" in key: snake_case_ : Any = key.replace("fc2" ,"dense2" ) if "linear_pred" in key: snake_case_ : List[str] = key.replace("linear_pred" ,"classifier" ) if "linear_fuse" in key: snake_case_ : Dict = key.replace("linear_fuse.conv" ,"linear_fuse" ) snake_case_ : Any = key.replace("linear_fuse.bn" ,"batch_norm" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 snake_case_ : Optional[Any] = key[key.find("linear_c" ) + len("linear_c" )] snake_case_ : Union[str, Any] = key.replace(F'''linear_c{idx}''' ,F'''linear_c.{int(__magic_name__ )-1}''' ) if "bot_conv" in key: snake_case_ : Union[str, Any] = key.replace("bot_conv" ,"0.convolution" ) if "skip_conv1" in key: snake_case_ : Any = key.replace("skip_conv1" ,"1.convolution" ) if "skip_conv2" in key: snake_case_ : List[str] = key.replace("skip_conv2" ,"2.convolution" ) if "fusion1" in key: snake_case_ : Union[str, Any] = key.replace("fusion1" ,"1.fusion" ) if "fusion2" in key: snake_case_ : Optional[Any] = key.replace("fusion2" ,"2.fusion" ) if "fusion3" in key: snake_case_ : int = key.replace("fusion3" ,"3.fusion" ) if "fusion" in key and "conv" in key: snake_case_ : List[Any] = key.replace("conv" ,"convolutional_layer" ) if key.startswith("module.last_layer_depth" ): snake_case_ : Optional[int] = key.replace("module.last_layer_depth" ,"head.head" ) snake_case_ : List[str] = value return new_state_dict def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Tuple: """simple docstring""" for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) snake_case_ : Optional[int] = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' ) snake_case_ : Optional[Any] = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict snake_case_ : str = kv_weight[ : config.hidden_sizes[i], : ] snake_case_ : Dict = kv_bias[: config.hidden_sizes[i]] snake_case_ : Optional[Any] = kv_weight[ config.hidden_sizes[i] :, : ] snake_case_ : Any = kv_bias[config.hidden_sizes[i] :] def __UpperCAmelCase ( )-> List[str]: """simple docstring""" snake_case_ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg" snake_case_ : List[Any] = Image.open(requests.get(__magic_name__ ,stream=__magic_name__ ).raw ) return image @torch.no_grad() def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__=False ,__magic_name__=None )-> List[str]: """simple docstring""" snake_case_ : str = GLPNConfig(hidden_sizes=[64, 128, 320, 512] ,decoder_hidden_size=64 ,depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) snake_case_ : Optional[int] = GLPNImageProcessor() # prepare image snake_case_ : Any = prepare_img() snake_case_ : Any = image_processor(images=__magic_name__ ,return_tensors="pt" ).pixel_values logger.info("Converting model..." ) # load original state dict snake_case_ : Optional[int] = torch.load(__magic_name__ ,map_location=torch.device("cpu" ) ) # rename keys snake_case_ : List[str] = rename_keys(__magic_name__ ) # key and value matrices need special treatment read_in_k_v(__magic_name__ ,__magic_name__ ) # create HuggingFace model and load state dict snake_case_ : str = GLPNForDepthEstimation(__magic_name__ ) model.load_state_dict(__magic_name__ ) model.eval() # forward pass snake_case_ : Union[str, Any] = model(__magic_name__ ) snake_case_ : int = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: snake_case_ : Dict = torch.tensor( [[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] ) elif "kitti" in model_name: snake_case_ : List[Any] = torch.tensor( [[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] ) else: raise ValueError(F'''Unknown model name: {model_name}''' ) snake_case_ : Tuple = torch.Size([1, 480, 640] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] ,__magic_name__ ,atol=1E-4 ) print("Looks ok!" ) # finally, push to hub if required if push_to_hub: logger.info("Pushing model and image processor to the hub..." ) model.push_to_hub( repo_path_or_name=Path(__magic_name__ ,__magic_name__ ) ,organization="nielsr" ,commit_message="Add model" ,use_temp_dir=__magic_name__ ,) image_processor.push_to_hub( repo_path_or_name=Path(__magic_name__ ,__magic_name__ ) ,organization="nielsr" ,commit_message="Add image processor" ,use_temp_dir=__magic_name__ ,) if __name__ == "__main__": __lowerCamelCase : List[str] = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.''' ) parser.add_argument( '''--model_name''', default='''glpn-kitti''', type=str, help='''Name of the model in case you\'re pushing to the hub.''', ) __lowerCamelCase : str = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
656
'''simple docstring''' import fire from utils import calculate_rouge, save_json def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__=None ,**__magic_name__ )-> Optional[Any]: """simple docstring""" snake_case_ : int = [x.strip() for x in open(__magic_name__ ).readlines()] snake_case_ : Optional[int] = [x.strip() for x in open(__magic_name__ ).readlines()][: len(__magic_name__ )] snake_case_ : List[Any] = calculate_rouge(__magic_name__ ,__magic_name__ ,**__magic_name__ ) if save_path is not None: save_json(__magic_name__ ,__magic_name__ ,indent=__magic_name__ ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
656
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __lowerCamelCase : Dict = logging.get_logger(__name__) __lowerCamelCase : Any = '''▁''' __lowerCamelCase : List[Any] = {'''vocab_file''': '''sentencepiece.bpe.model'''} __lowerCamelCase : Optional[int] = { '''vocab_file''': { '''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''', } } __lowerCamelCase : Any = { '''facebook/xglm-564M''': 2048, } class A_ (a_ ): """simple docstring""" a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = ['''input_ids''', '''attention_mask'''] def __init__( self :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int="<s>" , lowerCAmelCase__ :Dict="</s>" , lowerCAmelCase__ :str="</s>" , lowerCAmelCase__ :Union[str, Any]="<s>" , lowerCAmelCase__ :Optional[int]="<unk>" , lowerCAmelCase__ :Union[str, Any]="<pad>" , lowerCAmelCase__ :Optional[Dict[str, Any]] = None , **lowerCAmelCase__ :List[Any] , ) -> None: '''simple docstring''' snake_case_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer snake_case_ : Dict = 7 snake_case_ : Dict = [F'''<madeupword{i}>''' for i in range(self.num_madeup_words )] snake_case_ : Optional[Any] = kwargs.get("additional_special_tokens" , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , ) snake_case_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowerCAmelCase__ ) ) snake_case_ : int = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab snake_case_ : Optional[int] = 1 # Mimic fairseq token-to-id alignment for the first 4 token snake_case_ : int = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} snake_case_ : Optional[int] = len(self.sp_model ) snake_case_ : int = {F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(lowerCAmelCase__ ) snake_case_ : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self :Optional[int] ) -> List[Any]: '''simple docstring''' snake_case_ : Dict = self.__dict__.copy() snake_case_ : Tuple = None snake_case_ : List[str] = self.sp_model.serialized_model_proto() return state def __setstate__( self :Dict , lowerCAmelCase__ :int ) -> int: '''simple docstring''' snake_case_ : Union[str, Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): snake_case_ : Dict = {} snake_case_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _A ( self :List[str] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.sep_token_id] + token_ids_a snake_case_ : Optional[Any] = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def _A ( self :Optional[int] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None , lowerCAmelCase__ :bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ ) if token_ids_a is None: return [1] + ([0] * len(lowerCAmelCase__ )) return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) def _A ( self :Optional[int] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]: '''simple docstring''' snake_case_ : List[str] = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def _A ( self :Dict ) -> Optional[Any]: '''simple docstring''' return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def _A ( self :Dict ) -> Optional[int]: '''simple docstring''' snake_case_ : Dict = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _A ( self :int , lowerCAmelCase__ :str ) -> List[str]: '''simple docstring''' return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ ) def _A ( self :Union[str, Any] , lowerCAmelCase__ :Optional[Any] ) -> Tuple: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] snake_case_ : Dict = self.sp_model.PieceToId(lowerCAmelCase__ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _A ( self :List[str] , lowerCAmelCase__ :Tuple ) -> List[str]: '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _A ( self :Tuple , lowerCAmelCase__ :Optional[int] ) -> Optional[Any]: '''simple docstring''' snake_case_ : Union[str, Any] = "".join(lowerCAmelCase__ ).replace(lowerCAmelCase__ , " " ).strip() return out_string def _A ( self :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(lowerCAmelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case_ : int = os.path.join( lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCAmelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase__ , "wb" ) as fi: snake_case_ : Any = self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase__ ) return (out_vocab_file,)
656
'''simple docstring''' import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) __lowerCamelCase : Optional[Any] = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''', f'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''', f'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias''')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ('''input_proj.weight''', '''input_projection.weight'''), ('''input_proj.bias''', '''input_projection.bias'''), ('''query_embed.weight''', '''query_position_embeddings.weight'''), ('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''), ('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''), ('''class_embed.weight''', '''class_labels_classifier.weight'''), ('''class_embed.bias''', '''class_labels_classifier.bias'''), ('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''), ('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''), ('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''), ('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''), ('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''), ('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''), ('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''), ('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''), ('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''), ('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''), ('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''), ('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''), ('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''), ('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''), ('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''), ('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''), ] ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int: """simple docstring""" snake_case_ : Optional[Any] = state_dict.pop(__magic_name__ ) snake_case_ : Any = val def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]: """simple docstring""" snake_case_ : Any = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: snake_case_ : Optional[Any] = key.replace("backbone.0.body" ,"backbone.conv_encoder.model" ) snake_case_ : int = value else: snake_case_ : int = value return new_state_dict def __UpperCAmelCase ( __magic_name__ ,__magic_name__=False )-> Optional[int]: """simple docstring""" snake_case_ : str = "" if is_panoptic: snake_case_ : Dict = "conditional_detr." # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) snake_case_ : Any = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) snake_case_ : Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict snake_case_ : Tuple = in_proj_weight[:256, :] snake_case_ : List[Any] = in_proj_bias[:256] snake_case_ : Optional[Any] = in_proj_weight[256:512, :] snake_case_ : Optional[int] = in_proj_bias[256:512] snake_case_ : Optional[int] = in_proj_weight[-256:, :] snake_case_ : str = in_proj_bias[-256:] def __UpperCAmelCase ( )-> Optional[Any]: """simple docstring""" snake_case_ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg" snake_case_ : Optional[Any] = Image.open(requests.get(__magic_name__ ,stream=__magic_name__ ).raw ) return im @torch.no_grad() def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> List[str]: """simple docstring""" snake_case_ : Optional[Any] = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: snake_case_ : Optional[Any] = "resnet101" if "dc5" in model_name: snake_case_ : List[str] = True snake_case_ : Tuple = "panoptic" in model_name if is_panoptic: snake_case_ : List[Any] = 250 else: snake_case_ : Optional[Any] = 91 snake_case_ : Optional[int] = "huggingface/label-files" snake_case_ : Dict = "coco-detection-id2label.json" snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) ) snake_case_ : Optional[int] = {int(__magic_name__ ): v for k, v in idalabel.items()} snake_case_ : int = idalabel snake_case_ : Dict = {v: k for k, v in idalabel.items()} # load image processor snake_case_ : Optional[int] = "coco_panoptic" if is_panoptic else "coco_detection" snake_case_ : str = ConditionalDetrImageProcessor(format=__magic_name__ ) # prepare image snake_case_ : str = prepare_img() snake_case_ : int = image_processor(images=__magic_name__ ,return_tensors="pt" ) snake_case_ : Union[str, Any] = encoding["pixel_values"] logger.info(F'''Converting model {model_name}...''' ) # load original model from torch hub snake_case_ : Union[str, Any] = torch.hub.load("DeppMeng/ConditionalDETR" ,__magic_name__ ,pretrained=__magic_name__ ).eval() snake_case_ : Any = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: snake_case_ : Any = "conditional_detr." + src rename_key(__magic_name__ ,__magic_name__ ,__magic_name__ ) snake_case_ : Tuple = rename_backbone_keys(__magic_name__ ) # query, key and value matrices need special treatment read_in_q_k_v(__magic_name__ ,is_panoptic=__magic_name__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them snake_case_ : int = "conditional_detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("conditional_detr" ) and not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ) ): snake_case_ : Any = state_dict.pop(__magic_name__ ) snake_case_ : Optional[int] = val elif "class_labels_classifier" in key or "bbox_predictor" in key: snake_case_ : Tuple = state_dict.pop(__magic_name__ ) snake_case_ : Any = val elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ): continue else: snake_case_ : Union[str, Any] = state_dict.pop(__magic_name__ ) snake_case_ : List[Any] = val else: if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): snake_case_ : Any = state_dict.pop(__magic_name__ ) snake_case_ : List[Any] = val # finally, create HuggingFace model and load state dict snake_case_ : Optional[int] = ConditionalDetrForSegmentation(__magic_name__ ) if is_panoptic else ConditionalDetrForObjectDetection(__magic_name__ ) model.load_state_dict(__magic_name__ ) model.eval() model.push_to_hub(repo_id=__magic_name__ ,organization="DepuMeng" ,commit_message="Add model" ) # verify our conversion snake_case_ : Dict = conditional_detr(__magic_name__ ) snake_case_ : Union[str, Any] = model(__magic_name__ ) assert torch.allclose(outputs.logits ,original_outputs["pred_logits"] ,atol=1E-4 ) assert torch.allclose(outputs.pred_boxes ,original_outputs["pred_boxes"] ,atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks ,original_outputs["pred_masks"] ,atol=1E-4 ) # Save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ ) model.save_pretrained(__magic_name__ ) image_processor.save_pretrained(__magic_name__ ) if __name__ == "__main__": __lowerCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument( '''--model_name''', default='''conditional_detr_resnet50''', type=str, help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) __lowerCamelCase : int = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
656
1
'''simple docstring''' import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class A_ (a_ ): """simple docstring""" def _A ( self :Any ) -> int: '''simple docstring''' snake_case_ : Optional[Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowerCAmelCase__ , "hidden_sizes" ) ) self.parent.assertTrue(hasattr(lowerCAmelCase__ , "num_attention_heads" ) ) self.parent.assertTrue(hasattr(lowerCAmelCase__ , "num_encoder_blocks" ) ) class A_ : """simple docstring""" def __init__( self :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int=13 , lowerCAmelCase__ :int=64 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :List[Any]=4 , lowerCAmelCase__ :Dict=[2, 2, 2, 2] , lowerCAmelCase__ :Any=[8, 4, 2, 1] , lowerCAmelCase__ :Dict=[16, 32, 64, 128] , lowerCAmelCase__ :Union[str, Any]=[1, 4, 8, 16] , lowerCAmelCase__ :Dict=[1, 2, 4, 8] , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Any="gelu" , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :Optional[int]=0.1 , lowerCAmelCase__ :Dict=0.0_2 , lowerCAmelCase__ :str=3 , lowerCAmelCase__ :Optional[int]=None , ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[int] = parent snake_case_ : List[Any] = batch_size snake_case_ : List[Any] = image_size snake_case_ : Optional[Any] = num_channels snake_case_ : List[str] = num_encoder_blocks snake_case_ : Optional[int] = sr_ratios snake_case_ : Dict = depths snake_case_ : List[str] = hidden_sizes snake_case_ : Dict = downsampling_rates snake_case_ : Optional[int] = num_attention_heads snake_case_ : Optional[Any] = is_training snake_case_ : str = use_labels snake_case_ : List[str] = hidden_act snake_case_ : str = hidden_dropout_prob snake_case_ : str = attention_probs_dropout_prob snake_case_ : int = initializer_range snake_case_ : Union[str, Any] = num_labels snake_case_ : Dict = scope def _A ( self :Optional[int] ) -> Tuple: '''simple docstring''' snake_case_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ : Optional[int] = None if self.use_labels: snake_case_ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) snake_case_ : Optional[Any] = self.get_config() return config, pixel_values, labels def _A ( self :int ) -> int: '''simple docstring''' return SegformerConfig( image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] ) -> Optional[int]: '''simple docstring''' snake_case_ : Any = SegformerModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ : Tuple = model(lowerCAmelCase__ ) snake_case_ : Union[str, Any] = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def _A ( self :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> Optional[int]: '''simple docstring''' snake_case_ : str = self.num_labels snake_case_ : int = SegformerForSemanticSegmentation(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ : str = model(lowerCAmelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) snake_case_ : str = model(lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) self.parent.assertGreater(result.loss , 0.0 ) def _A ( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Dict = 1 snake_case_ : Optional[int] = SegformerForSemanticSegmentation(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ : Optional[Any] = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(lowerCAmelCase__ ) snake_case_ : List[str] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertGreater(result.loss , 0.0 ) def _A ( self :Optional[int] ) -> List[str]: '''simple docstring''' snake_case_ : List[str] = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_ : Optional[int] = config_and_inputs snake_case_ : Tuple = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class A_ (a_ , a_ , unittest.TestCase ): """simple docstring""" a__ = ( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) a__ = ( { '''feature-extraction''': SegformerModel, '''image-classification''': SegformerForImageClassification, '''image-segmentation''': SegformerForSemanticSegmentation, } if is_torch_available() else {} ) a__ = True a__ = False a__ = False a__ = False def _A ( self :List[str] ) -> int: '''simple docstring''' snake_case_ : Union[str, Any] = SegformerModelTester(self ) snake_case_ : Union[str, Any] = SegformerConfigTester(self , config_class=lowerCAmelCase__ ) def _A ( self :Tuple ) -> int: '''simple docstring''' self.config_tester.run_common_tests() def _A ( self :str ) -> str: '''simple docstring''' snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def _A ( self :List[str] ) -> str: '''simple docstring''' snake_case_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*lowerCAmelCase__ ) def _A ( self :Union[str, Any] ) -> Optional[Any]: '''simple docstring''' snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*lowerCAmelCase__ ) @unittest.skip("SegFormer does not use inputs_embeds" ) def _A ( self :Union[str, Any] ) -> Optional[int]: '''simple docstring''' pass @unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" ) def _A ( self :Optional[int] ) -> Tuple: '''simple docstring''' pass def _A ( self :Tuple ) -> Optional[int]: '''simple docstring''' snake_case_, snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : List[Any] = model_class(lowerCAmelCase__ ) snake_case_ : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ : int = [*signature.parameters.keys()] snake_case_ : List[Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCAmelCase__ ) def _A ( self :List[str] ) -> Optional[int]: '''simple docstring''' snake_case_, snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Optional[int] = True for model_class in self.all_model_classes: snake_case_ : Dict = True snake_case_ : List[Any] = False snake_case_ : Optional[int] = True snake_case_ : Optional[int] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): snake_case_ : Tuple = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) snake_case_ : Dict = outputs.attentions snake_case_ : List[str] = sum(self.model_tester.depths ) self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) # check that output_attentions also work using config del inputs_dict["output_attentions"] snake_case_ : Any = True snake_case_ : Union[str, Any] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): snake_case_ : str = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) snake_case_ : List[str] = outputs.attentions self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) # verify the first attentions (first block, first layer) snake_case_ : List[str] = (self.model_tester.image_size // 4) ** 2 snake_case_ : int = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) # verify the last attentions (last block, last layer) snake_case_ : int = (self.model_tester.image_size // 32) ** 2 snake_case_ : int = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , ) snake_case_ : Union[str, Any] = len(lowerCAmelCase__ ) # Check attention is always last and order is fine snake_case_ : Dict = True snake_case_ : List[Any] = True snake_case_ : int = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): snake_case_ : Tuple = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) self.assertEqual(out_len + 1 , len(lowerCAmelCase__ ) ) snake_case_ : Optional[Any] = outputs.attentions self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) # verify the first attentions (first block, first layer) snake_case_ : Dict = (self.model_tester.image_size // 4) ** 2 snake_case_ : List[Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) def _A ( self :Any ) -> Optional[int]: '''simple docstring''' def check_hidden_states_output(lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] ): snake_case_ : str = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): snake_case_ : Any = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) snake_case_ : int = outputs.hidden_states snake_case_ : Optional[int] = self.model_tester.num_encoder_blocks self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) snake_case_, snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : Any = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ : Tuple = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def _A ( self :Union[str, Any] ) -> Optional[Any]: '''simple docstring''' if not self.model_tester.is_training: return snake_case_, snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Any = True for model_class in self.all_model_classes: if model_class in get_values(lowerCAmelCase__ ): continue snake_case_ : Optional[int] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.train() snake_case_ : Optional[Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ ) snake_case_ : Optional[Any] = model(**lowerCAmelCase__ ).loss loss.backward() @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def _A ( self :Optional[int] ) -> Dict: '''simple docstring''' pass @slow def _A ( self :List[Any] ) -> str: '''simple docstring''' for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : Optional[int] = SegformerModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) def __UpperCAmelCase ( )-> int: """simple docstring""" snake_case_ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch class A_ (unittest.TestCase ): """simple docstring""" @slow def _A ( self :List[str] ) -> Optional[int]: '''simple docstring''' snake_case_ : List[str] = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=lowerCAmelCase__ , align=lowerCAmelCase__ , do_random_crop=lowerCAmelCase__ ) snake_case_ : Tuple = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to( lowerCAmelCase__ ) snake_case_ : int = prepare_img() snake_case_ : str = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ) snake_case_ : int = encoded_inputs.pixel_values.to(lowerCAmelCase__ ) with torch.no_grad(): snake_case_ : Tuple = model(lowerCAmelCase__ ) snake_case_ : Tuple = torch.Size((1, model.config.num_labels, 128, 128) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase__ ) snake_case_ : List[Any] = torch.tensor( [ [[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]], [[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]], [[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]], ] ).to(lowerCAmelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) ) @slow def _A ( self :int ) -> Dict: '''simple docstring''' snake_case_ : Union[str, Any] = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=lowerCAmelCase__ , align=lowerCAmelCase__ , do_random_crop=lowerCAmelCase__ ) snake_case_ : List[Any] = SegformerForSemanticSegmentation.from_pretrained( "nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(lowerCAmelCase__ ) snake_case_ : Any = prepare_img() snake_case_ : str = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ) snake_case_ : List[Any] = encoded_inputs.pixel_values.to(lowerCAmelCase__ ) with torch.no_grad(): snake_case_ : List[str] = model(lowerCAmelCase__ ) snake_case_ : List[str] = torch.Size((1, model.config.num_labels, 128, 128) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase__ ) snake_case_ : List[Any] = torch.tensor( [ [[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]], [[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]], [[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]], ] ).to(lowerCAmelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowerCAmelCase__ , atol=1E-1 ) ) @slow def _A ( self :Any ) -> Optional[int]: '''simple docstring''' snake_case_ : Optional[int] = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=lowerCAmelCase__ , align=lowerCAmelCase__ , do_random_crop=lowerCAmelCase__ ) snake_case_ : List[str] = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to( lowerCAmelCase__ ) snake_case_ : Tuple = prepare_img() snake_case_ : str = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ) snake_case_ : Optional[int] = encoded_inputs.pixel_values.to(lowerCAmelCase__ ) with torch.no_grad(): snake_case_ : Union[str, Any] = model(lowerCAmelCase__ ) snake_case_ : Tuple = outputs.logits.detach().cpu() snake_case_ : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase__ , target_sizes=[(500, 300)] ) snake_case_ : Optional[Any] = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , lowerCAmelCase__ ) snake_case_ : Dict = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase__ ) snake_case_ : Optional[Any] = torch.Size((128, 128) ) self.assertEqual(segmentation[0].shape , lowerCAmelCase__ )
656
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import XLMRobertaTokenizer from diffusers import ( AltDiffusionImgaImgPipeline, AutoencoderKL, PNDMScheduler, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class A_ (unittest.TestCase ): """simple docstring""" def _A ( self :Any ) -> str: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _A ( self :List[Any] ) -> List[str]: '''simple docstring''' snake_case_ : Any = 1 snake_case_ : Dict = 3 snake_case_ : Union[str, Any] = (32, 32) snake_case_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ ) return image @property def _A ( self :Optional[int] ) -> Any: '''simple docstring''' torch.manual_seed(0 ) snake_case_ : List[str] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) return model @property def _A ( self :Dict ) -> Any: '''simple docstring''' torch.manual_seed(0 ) snake_case_ : Optional[Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) return model @property def _A ( self :Dict ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) snake_case_ : str = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , ) return RobertaSeriesModelWithTransformation(lowerCAmelCase__ ) @property def _A ( self :Any ) -> str: '''simple docstring''' def extract(*lowerCAmelCase__ :Any , **lowerCAmelCase__ :List[str] ): class A_ : """simple docstring""" def __init__( self :Optional[int] ) -> List[str]: '''simple docstring''' snake_case_ : str = torch.ones([0] ) def _A ( self :int , lowerCAmelCase__ :List[Any] ) -> Tuple: '''simple docstring''' self.pixel_values.to(lowerCAmelCase__ ) return self return Out() return extract def _A ( self :int ) -> Dict: '''simple docstring''' snake_case_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator snake_case_ : str = self.dummy_cond_unet snake_case_ : Optional[int] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ ) snake_case_ : Dict = self.dummy_vae snake_case_ : Dict = self.dummy_text_encoder snake_case_ : Optional[int] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" ) snake_case_ : str = 77 snake_case_ : Any = self.dummy_image.to(lowerCAmelCase__ ) snake_case_ : Tuple = init_image / 2 + 0.5 # make sure here that pndm scheduler skips prk snake_case_ : Optional[Any] = AltDiffusionImgaImgPipeline( unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , ) snake_case_ : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ ) snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ ) alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) snake_case_ : Dict = "A painting of a squirrel eating a burger" snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 ) snake_case_ : Dict = alt_pipe( [prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , ) snake_case_ : Any = output.images snake_case_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 ) snake_case_ : Optional[Any] = alt_pipe( [prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0] snake_case_ : Tuple = image[0, -3:, -3:, -1] snake_case_ : Dict = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case_ : int = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def _A ( self :int ) -> List[str]: '''simple docstring''' snake_case_ : Union[str, Any] = self.dummy_cond_unet snake_case_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ ) snake_case_ : int = self.dummy_vae snake_case_ : List[Any] = self.dummy_text_encoder snake_case_ : int = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" ) snake_case_ : int = 77 snake_case_ : Dict = self.dummy_image.to(lowerCAmelCase__ ) # put models in fp16 snake_case_ : Optional[Any] = unet.half() snake_case_ : Tuple = vae.half() snake_case_ : List[str] = bert.half() # make sure here that pndm scheduler skips prk snake_case_ : Optional[int] = AltDiffusionImgaImgPipeline( unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , ) snake_case_ : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ ) snake_case_ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ ) alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) snake_case_ : List[Any] = "A painting of a squirrel eating a burger" snake_case_ : str = torch.manual_seed(0 ) snake_case_ : Any = alt_pipe( [prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , image=lowerCAmelCase__ , ).images assert image.shape == (1, 32, 32, 3) @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def _A ( self :Optional[int] ) -> Any: '''simple docstring''' snake_case_ : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) # resize to resolution that is divisible by 8 but not 16 or 32 snake_case_ : str = init_image.resize((760, 504) ) snake_case_ : Optional[Any] = "BAAI/AltDiffusion" snake_case_ : int = AltDiffusionImgaImgPipeline.from_pretrained( lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() snake_case_ : Tuple = "A fantasy landscape, trending on artstation" snake_case_ : int = torch.manual_seed(0 ) snake_case_ : List[str] = pipe( prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , ) snake_case_ : str = output.images[0] snake_case_ : List[Any] = image[255:258, 383:386, -1] assert image.shape == (504, 760, 3) snake_case_ : Tuple = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class A_ (unittest.TestCase ): """simple docstring""" def _A ( self :Optional[Any] ) -> Optional[int]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _A ( self :str ) -> Any: '''simple docstring''' snake_case_ : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) snake_case_ : List[Any] = init_image.resize((768, 512) ) snake_case_ : Tuple = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" ) snake_case_ : Any = "BAAI/AltDiffusion" snake_case_ : List[str] = AltDiffusionImgaImgPipeline.from_pretrained( lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() snake_case_ : Tuple = "A fantasy landscape, trending on artstation" snake_case_ : Tuple = torch.manual_seed(0 ) snake_case_ : List[Any] = pipe( prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="np" , ) snake_case_ : Optional[int] = output.images[0] assert image.shape == (512, 768, 3) # img2img is flaky across GPUs even in fp32, so using MAE here assert np.abs(expected_image - image ).max() < 1E-2
656
1
'''simple docstring''' def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int: """simple docstring""" def count_of_possible_combinations(__magic_name__ ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(__magic_name__ ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int: """simple docstring""" def count_of_possible_combinations_with_dp_array( __magic_name__ ,__magic_name__ ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] snake_case_ : Any = sum( count_of_possible_combinations_with_dp_array(target - item ,__magic_name__ ) for item in array ) snake_case_ : List[Any] = answer return answer snake_case_ : List[Any] = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(__magic_name__ ,__magic_name__ ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int: """simple docstring""" snake_case_ : Union[str, Any] = [0] * (target + 1) snake_case_ : str = 1 for i in range(1 ,target + 1 ): for j in range(__magic_name__ ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() __lowerCamelCase : Tuple = 3 __lowerCamelCase : Optional[int] = 5 __lowerCamelCase : str = [1, 2, 5] print(combination_sum_iv(n, array, target))
656
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. __lowerCamelCase : List[str] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''} @is_pipeline_test class A_ (unittest.TestCase ): """simple docstring""" a__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING a__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: a__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: a__ = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def _A ( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict ) -> Any: '''simple docstring''' snake_case_ : Optional[Any] = ZeroShotClassificationPipeline( model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , candidate_labels=["polics", "health"] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def _A ( self :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[Any]: '''simple docstring''' snake_case_ : Tuple = classifier("Who are you voting for in 2020?" , candidate_labels="politics" ) self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} ) # No kwarg snake_case_ : List[Any] = classifier("Who are you voting for in 2020?" , ["politics"] ) self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} ) snake_case_ : Dict = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] ) self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} ) snake_case_ : int = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" ) self.assertEqual( lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 ) snake_case_ : Optional[int] = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] ) self.assertEqual( lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 ) snake_case_ : str = classifier( "Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" ) self.assertEqual(lowerCAmelCase__ , {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ )]} ) # https://github.com/huggingface/transformers/issues/13846 snake_case_ : Dict = classifier(["I am happy"] , ["positive", "negative"] ) self.assertEqual( lowerCAmelCase__ , [ {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} for i in range(1 ) ] , ) snake_case_ : Tuple = classifier(["I am happy", "I am sad"] , ["positive", "negative"] ) self.assertEqual( lowerCAmelCase__ , [ {"sequence": ANY(lowerCAmelCase__ ), "labels": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )], "scores": [ANY(lowerCAmelCase__ ), ANY(lowerCAmelCase__ )]} for i in range(2 ) ] , ) with self.assertRaises(lowerCAmelCase__ ): classifier("" , candidate_labels="politics" ) with self.assertRaises(lowerCAmelCase__ ): classifier(lowerCAmelCase__ , candidate_labels="politics" ) with self.assertRaises(lowerCAmelCase__ ): classifier("Who are you voting for in 2020?" , candidate_labels="" ) with self.assertRaises(lowerCAmelCase__ ): classifier("Who are you voting for in 2020?" , candidate_labels=lowerCAmelCase__ ) with self.assertRaises(lowerCAmelCase__ ): classifier( "Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , ) with self.assertRaises(lowerCAmelCase__ ): classifier( "Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=lowerCAmelCase__ , ) self.run_entailment_id(lowerCAmelCase__ ) def _A ( self :List[Any] , lowerCAmelCase__ :Pipeline ) -> Union[str, Any]: '''simple docstring''' snake_case_ : int = zero_shot_classifier.model.config snake_case_ : Optional[int] = config.labelaid snake_case_ : Tuple = zero_shot_classifier.entailment_id snake_case_ : Optional[Any] = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2} self.assertEqual(zero_shot_classifier.entailment_id , -1 ) snake_case_ : Tuple = {"entailment": 0, "neutral": 1, "contradiction": 2} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) snake_case_ : str = {"ENTAIL": 0, "NON-ENTAIL": 1} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) snake_case_ : str = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0} self.assertEqual(zero_shot_classifier.entailment_id , 2 ) snake_case_ : List[str] = original_labelaid self.assertEqual(lowerCAmelCase__ , zero_shot_classifier.entailment_id ) @require_torch def _A ( self :Tuple ) -> Any: '''simple docstring''' snake_case_ : List[Any] = pipeline( "zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( "Who are you voting for in 2020?" * 100 , candidate_labels=["politics", "public health", "science"] ) @require_torch def _A ( self :Optional[Any] ) -> Tuple: '''simple docstring''' snake_case_ : Union[str, Any] = pipeline( "zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , ) snake_case_ : int = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { "sequence": "Who are you voting for in 2020?", "labels": ["science", "public health", "politics"], "scores": [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @require_tf def _A ( self :Union[str, Any] ) -> Dict: '''simple docstring''' snake_case_ : List[str] = pipeline( "zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , ) snake_case_ : Optional[int] = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { "sequence": "Who are you voting for in 2020?", "labels": ["science", "public health", "politics"], "scores": [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @slow @require_torch def _A ( self :Union[str, Any] ) -> int: '''simple docstring''' snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" ) snake_case_ : str = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { "sequence": "Who are you voting for in 2020?", "labels": ["politics", "public health", "science"], "scores": [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) snake_case_ : Optional[int] = zero_shot_classifier( "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks" " in an encoder-decoder configuration. The best performing models also connect the encoder and decoder" " through an attention mechanism. We propose a new simple network architecture, the Transformer, based" " solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two" " machine translation tasks show these models to be superior in quality while being more parallelizable" " and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014" " English-to-German translation task, improving over the existing best results, including ensembles by" " over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new" " single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small" " fraction of the training costs of the best models from the literature. We show that the Transformer" " generalizes well to other tasks by applying it successfully to English constituency parsing both with" " large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { "sequence": ( "The dominant sequence transduction models are based on complex recurrent or convolutional neural" " networks in an encoder-decoder configuration. The best performing models also connect the" " encoder and decoder through an attention mechanism. We propose a new simple network" " architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence" " and convolutions entirely. Experiments on two machine translation tasks show these models to be" " superior in quality while being more parallelizable and requiring significantly less time to" " train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task," " improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014" " English-to-French translation task, our model establishes a new single-model state-of-the-art" " BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training" " costs of the best models from the literature. We show that the Transformer generalizes well to" " other tasks by applying it successfully to English constituency parsing both with large and" " limited training data." ), "labels": ["translation", "machine learning", "vision", "statistics"], "scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , ) @slow @require_tf def _A ( self :List[str] ) -> str: '''simple docstring''' snake_case_ : int = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" ) snake_case_ : Optional[Any] = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { "sequence": "Who are you voting for in 2020?", "labels": ["politics", "public health", "science"], "scores": [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) snake_case_ : Tuple = zero_shot_classifier( "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks" " in an encoder-decoder configuration. The best performing models also connect the encoder and decoder" " through an attention mechanism. We propose a new simple network architecture, the Transformer, based" " solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two" " machine translation tasks show these models to be superior in quality while being more parallelizable" " and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014" " English-to-German translation task, improving over the existing best results, including ensembles by" " over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new" " single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small" " fraction of the training costs of the best models from the literature. We show that the Transformer" " generalizes well to other tasks by applying it successfully to English constituency parsing both with" " large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=lowerCAmelCase__ , ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { "sequence": ( "The dominant sequence transduction models are based on complex recurrent or convolutional neural" " networks in an encoder-decoder configuration. The best performing models also connect the" " encoder and decoder through an attention mechanism. We propose a new simple network" " architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence" " and convolutions entirely. Experiments on two machine translation tasks show these models to be" " superior in quality while being more parallelizable and requiring significantly less time to" " train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task," " improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014" " English-to-French translation task, our model establishes a new single-model state-of-the-art" " BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training" " costs of the best models from the literature. We show that the Transformer generalizes well to" " other tasks by applying it successfully to English constituency parsing both with large and" " limited training data." ), "labels": ["translation", "machine learning", "vision", "statistics"], "scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , )
656
1
'''simple docstring''' import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class A_ (unittest.TestCase ): """simple docstring""" def _A ( self :int ) -> Dict: '''simple docstring''' super().tearDown() gc.collect() def _A ( self :Union[str, Any] ) -> List[Any]: '''simple docstring''' snake_case_, snake_case_ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , ) snake_case_ : Tuple = "A painting of a squirrel eating a burger" snake_case_ : Tuple = jax.device_count() snake_case_ : Dict = num_samples * [prompt] snake_case_ : Tuple = sd_pipe.prepare_inputs(lowerCAmelCase__ ) snake_case_ : str = replicate(lowerCAmelCase__ ) snake_case_ : Dict = shard(lowerCAmelCase__ ) snake_case_ : Any = jax.random.PRNGKey(0 ) snake_case_ : List[str] = jax.random.split(lowerCAmelCase__ , jax.device_count() ) snake_case_ : Optional[int] = sd_pipe(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , num_inference_steps=25 , jit=lowerCAmelCase__ )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) snake_case_ : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) snake_case_ : Optional[Any] = images[0, 253:256, 253:256, -1] snake_case_ : List[str] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case_ : Dict = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def _A ( self :Optional[Any] ) -> str: '''simple docstring''' snake_case_ : Any = "stabilityai/stable-diffusion-2" snake_case_, snake_case_ : Tuple = FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCAmelCase__ , subfolder="scheduler" ) snake_case_, snake_case_ : int = FlaxStableDiffusionPipeline.from_pretrained( lowerCAmelCase__ , scheduler=lowerCAmelCase__ , revision="bf16" , dtype=jnp.bfloataa , ) snake_case_ : int = scheduler_params snake_case_ : Optional[Any] = "A painting of a squirrel eating a burger" snake_case_ : List[Any] = jax.device_count() snake_case_ : List[str] = num_samples * [prompt] snake_case_ : List[Any] = sd_pipe.prepare_inputs(lowerCAmelCase__ ) snake_case_ : List[Any] = replicate(lowerCAmelCase__ ) snake_case_ : List[Any] = shard(lowerCAmelCase__ ) snake_case_ : int = jax.random.PRNGKey(0 ) snake_case_ : List[Any] = jax.random.split(lowerCAmelCase__ , jax.device_count() ) snake_case_ : Optional[int] = sd_pipe(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , num_inference_steps=25 , jit=lowerCAmelCase__ )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) snake_case_ : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) snake_case_ : List[str] = images[0, 253:256, 253:256, -1] snake_case_ : str = jnp.asarray(jax.device_get(image_slice.flatten() ) ) snake_case_ : Dict = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
656
'''simple docstring''' import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''): raise Exception('''requires fairseq >= 1.0.0a''') logging.set_verbosity_info() __lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) __lowerCamelCase : Union[str, Any] = '''Hello world! cécé herlolip''' def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[Any]: """simple docstring""" snake_case_ : str = FairseqRobertaModel.from_pretrained(__magic_name__ ) roberta.eval() # disable dropout snake_case_ : Dict = roberta.model.encoder.sentence_encoder snake_case_ : List[str] = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,) if classification_head: snake_case_ : List[str] = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0] print("Our RoBERTa config:" ,__magic_name__ ) snake_case_ : List[str] = XLMRobertaXLForSequenceClassification(__magic_name__ ) if classification_head else XLMRobertaXLForMaskedLM(__magic_name__ ) model.eval() # Now let's copy all the weights. # Embeddings snake_case_ : List[Any] = roberta_sent_encoder.embed_tokens.weight snake_case_ : int = roberta_sent_encoder.embed_positions.weight snake_case_ : Union[str, Any] = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. snake_case_ : Union[str, Any] = roberta_sent_encoder.layer_norm.weight snake_case_ : str = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer snake_case_ : BertLayer = model.roberta.encoder.layer[i] snake_case_ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i] snake_case_ : RobertaAttention = layer.attention snake_case_ : Dict = roberta_layer.self_attn_layer_norm.weight snake_case_ : Dict = roberta_layer.self_attn_layer_norm.bias # self attention snake_case_ : BertSelfAttention = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) snake_case_ : Dict = roberta_layer.self_attn.q_proj.weight snake_case_ : Any = roberta_layer.self_attn.q_proj.bias snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.weight snake_case_ : Optional[Any] = roberta_layer.self_attn.k_proj.bias snake_case_ : Optional[int] = roberta_layer.self_attn.v_proj.weight snake_case_ : Any = roberta_layer.self_attn.v_proj.bias # self-attention output snake_case_ : BertSelfOutput = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape snake_case_ : List[str] = roberta_layer.self_attn.out_proj.weight snake_case_ : Optional[int] = roberta_layer.self_attn.out_proj.bias # this one is final layer norm snake_case_ : int = roberta_layer.final_layer_norm.weight snake_case_ : Union[str, Any] = roberta_layer.final_layer_norm.bias # intermediate snake_case_ : BertIntermediate = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape snake_case_ : List[str] = roberta_layer.fca.weight snake_case_ : List[Any] = roberta_layer.fca.bias # output snake_case_ : BertOutput = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape snake_case_ : Any = roberta_layer.fca.weight snake_case_ : Any = roberta_layer.fca.bias # end of layer if classification_head: snake_case_ : int = roberta.model.classification_heads["mnli"].dense.weight snake_case_ : Union[str, Any] = roberta.model.classification_heads["mnli"].dense.bias snake_case_ : Tuple = roberta.model.classification_heads["mnli"].out_proj.weight snake_case_ : str = roberta.model.classification_heads["mnli"].out_proj.bias else: # LM Head snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.dense.weight snake_case_ : int = roberta.model.encoder.lm_head.dense.bias snake_case_ : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.weight snake_case_ : Optional[int] = roberta.model.encoder.lm_head.layer_norm.bias snake_case_ : int = roberta.model.encoder.lm_head.weight snake_case_ : List[str] = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. snake_case_ : torch.Tensor = roberta.encode(__magic_name__ ).unsqueeze(0 ) # batch of size 1 snake_case_ : Union[str, Any] = model(__magic_name__ )[0] if classification_head: snake_case_ : Optional[Any] = roberta.model.classification_heads["mnli"](roberta.extract_features(__magic_name__ ) ) else: snake_case_ : List[str] = roberta.model(__magic_name__ )[0] print(our_output.shape ,their_output.shape ) snake_case_ : str = torch.max(torch.abs(our_output - their_output ) ).item() print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 snake_case_ : Any = torch.allclose(__magic_name__ ,__magic_name__ ,atol=1E-3 ) print("Do both models output the same tensors?" ,"🔥" if success else "💩" ) if not success: raise Exception("Something went wRoNg" ) pathlib.Path(__magic_name__ ).mkdir(parents=__magic_name__ ,exist_ok=__magic_name__ ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__magic_name__ ) if __name__ == "__main__": __lowerCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.''' ) __lowerCamelCase : Tuple = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
656
1
'''simple docstring''' import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class A_ : """simple docstring""" def __init__( self :Optional[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Any=13 , lowerCAmelCase__ :Optional[Any]=7 , lowerCAmelCase__ :int=True , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :List[Any]=99 , lowerCAmelCase__ :List[str]=32 , lowerCAmelCase__ :int=5 , lowerCAmelCase__ :Union[str, Any]=4 , lowerCAmelCase__ :Union[str, Any]=37 , lowerCAmelCase__ :Optional[int]="gelu" , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :List[str]=50 , lowerCAmelCase__ :int=0.0_2 , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :List[str]=None , ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[Any] = parent snake_case_ : Any = batch_size snake_case_ : int = seq_length snake_case_ : Dict = is_training snake_case_ : str = use_input_mask snake_case_ : Any = vocab_size snake_case_ : List[str] = hidden_size snake_case_ : Tuple = num_hidden_layers snake_case_ : Optional[int] = num_attention_heads snake_case_ : List[Any] = intermediate_size snake_case_ : Optional[int] = hidden_act snake_case_ : Tuple = hidden_dropout_prob snake_case_ : Optional[Any] = attention_probs_dropout_prob snake_case_ : Union[str, Any] = max_position_embeddings snake_case_ : str = initializer_range snake_case_ : Dict = use_labels snake_case_ : List[str] = scope def _A ( self :List[Any] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ : List[Any] = None if self.use_input_mask: snake_case_ : str = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ : Any = self.get_config() return config, input_ids, input_mask, token_labels def _A ( self :str ) -> List[str]: '''simple docstring''' return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , ) def _A ( self :Optional[Any] ) -> Union[str, Any]: '''simple docstring''' ( ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ) : List[str] = self.prepare_config_and_inputs() snake_case_ : Any = True snake_case_ : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) snake_case_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def _A ( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple , **lowerCAmelCase__ :Dict , ) -> Optional[Any]: '''simple docstring''' snake_case_ : Optional[int] = BertGenerationEncoder(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ : int = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) snake_case_ : Any = model(lowerCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A ( self :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Optional[Any] , ) -> int: '''simple docstring''' snake_case_ : str = True snake_case_ : Tuple = BertGenerationEncoder(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ : List[str] = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , ) snake_case_ : List[str] = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _A ( self :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] , **lowerCAmelCase__ :int , ) -> Tuple: '''simple docstring''' snake_case_ : Tuple = True snake_case_ : str = True snake_case_ : Union[str, Any] = BertGenerationDecoder(config=lowerCAmelCase__ ).to(lowerCAmelCase__ ).eval() # first forward pass snake_case_ : Dict = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ , ) snake_case_ : List[str] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids snake_case_ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) snake_case_ : List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and snake_case_ : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 ) snake_case_ : str = torch.cat([input_mask, next_mask] , dim=-1 ) snake_case_ : str = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )["hidden_states"][0] snake_case_ : List[Any] = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )["hidden_states"][0] # select random slice snake_case_ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item() snake_case_ : Any = output_from_no_past[:, -3:, random_slice_idx].detach() snake_case_ : List[str] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) ) def _A ( self :int , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :int , *lowerCAmelCase__ :Optional[Any] , ) -> Optional[int]: '''simple docstring''' snake_case_ : Dict = BertGenerationDecoder(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ : List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _A ( self :Tuple ) -> List[Any]: '''simple docstring''' snake_case_, snake_case_, snake_case_, snake_case_ : int = self.prepare_config_and_inputs() snake_case_ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class A_ (a_ , a_ , a_ , unittest.TestCase ): """simple docstring""" a__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () a__ = (BertGenerationDecoder,) if is_torch_available() else () a__ = ( {'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder} if is_torch_available() else {} ) def _A ( self :int ) -> str: '''simple docstring''' snake_case_ : List[str] = BertGenerationEncoderTester(self ) snake_case_ : str = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 ) def _A ( self :Optional[Any] ) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def _A ( self :int ) -> List[Any]: '''simple docstring''' snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def _A ( self :Tuple ) -> Union[str, Any]: '''simple docstring''' snake_case_, snake_case_, snake_case_, snake_case_ : str = self.model_tester.prepare_config_and_inputs() snake_case_ : int = "bert" self.model_tester.create_and_check_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def _A ( self :List[str] ) -> Any: '''simple docstring''' snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*lowerCAmelCase__ ) def _A ( self :List[Any] ) -> Dict: '''simple docstring''' snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCAmelCase__ ) def _A ( self :Optional[Any] ) -> Any: '''simple docstring''' ( ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ( snake_case_ ), ) : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() snake_case_ : Optional[Any] = None self.model_tester.create_and_check_model_as_decoder( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) def _A ( self :Optional[Any] ) -> Dict: '''simple docstring''' snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*lowerCAmelCase__ ) @slow def _A ( self :Optional[Any] ) -> str: '''simple docstring''' snake_case_ : int = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) self.assertIsNotNone(lowerCAmelCase__ ) @require_torch class A_ (unittest.TestCase ): """simple docstring""" @slow def _A ( self :int ) -> int: '''simple docstring''' snake_case_ : Optional[int] = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) snake_case_ : str = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]] ) with torch.no_grad(): snake_case_ : Optional[Any] = model(lowerCAmelCase__ )[0] snake_case_ : Optional[int] = torch.Size([1, 8, 1_024] ) self.assertEqual(output.shape , lowerCAmelCase__ ) snake_case_ : str = torch.tensor( [[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) ) @require_torch class A_ (unittest.TestCase ): """simple docstring""" @slow def _A ( self :Union[str, Any] ) -> Dict: '''simple docstring''' snake_case_ : str = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) snake_case_ : Optional[Any] = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]] ) with torch.no_grad(): snake_case_ : List[Any] = model(lowerCAmelCase__ )[0] snake_case_ : Optional[Any] = torch.Size([1, 8, 50_358] ) self.assertEqual(output.shape , lowerCAmelCase__ ) snake_case_ : Optional[Any] = torch.tensor( [[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
656
'''simple docstring''' import os import sys import tempfile import torch from .state import AcceleratorState from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=None ,__magic_name__="no" ,__magic_name__="29500" )-> Optional[int]: """simple docstring""" snake_case_ : str = False snake_case_ : int = False if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ): snake_case_ : Any = True elif "IPython" in sys.modules: snake_case_ : Union[str, Any] = "google.colab" in str(sys.modules["IPython"].get_ipython() ) try: snake_case_ : Any = PrecisionType(mixed_precision.lower() ) except ValueError: raise ValueError( F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' ) if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" ,__magic_name__ ) is not None): # TPU launch import torch_xla.distributed.xla_multiprocessing as xmp if len(AcceleratorState._shared_state ) > 0: raise ValueError( "To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside " "your training function. Restart your notebook and make sure no cells initializes an " "`Accelerator`." ) if num_processes is None: snake_case_ : Tuple = 8 snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="TPU" ) print(F'''Launching a training on {num_processes} TPU cores.''' ) xmp.spawn(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" ) elif in_colab: # No need for a distributed launch otherwise as it's either CPU or one GPU. if torch.cuda.is_available(): print("Launching training on one GPU." ) else: print("Launching training on one CPU." ) function(*__magic_name__ ) else: if num_processes is None: raise ValueError( "You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." ) if num_processes > 1: # Multi-GPU launch from torch.multiprocessing import start_processes from torch.multiprocessing.spawn import ProcessRaisedException if len(AcceleratorState._shared_state ) > 0: raise ValueError( "To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized " "inside your training function. Restart your notebook and make sure no cells initializes an " "`Accelerator`." ) if torch.cuda.is_initialized(): raise ValueError( "To launch a multi-GPU training from your notebook, you need to avoid running any instruction " "using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA " "function." ) # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=__magic_name__ ,master_addr="127.0.01" ,master_port=__magic_name__ ,mixed_precision=__magic_name__ ): snake_case_ : Optional[int] = PrepareForLaunch(__magic_name__ ,distributed_type="MULTI_GPU" ) print(F'''Launching training on {num_processes} GPUs.''' ) try: start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" ) except ProcessRaisedException as e: if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: raise RuntimeError( "CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. " "This likely stems from an outside import causing issues once the `notebook_launcher()` is called. " "Please review your imports and test them when running the `notebook_launcher()` to identify " "which one is problematic." ) from e else: # No need for a distributed launch otherwise as it's either CPU, GPU or MPS. if is_mps_available(): snake_case_ : Any = "1" print("Launching training on MPS." ) elif torch.cuda.is_available(): print("Launching training on one GPU." ) else: print("Launching training on CPU." ) function(*__magic_name__ ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__=() ,__magic_name__=2 )-> Dict: """simple docstring""" from torch.multiprocessing import start_processes with tempfile.NamedTemporaryFile() as tmp_file: # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=__magic_name__ ,master_addr="127.0.01" ,master_port="29500" ,accelerate_mixed_precision="no" ,accelerate_debug_rdv_file=tmp_file.name ,accelerate_use_cpu="yes" ,): snake_case_ : Any = PrepareForLaunch(__magic_name__ ,debug=__magic_name__ ) start_processes(__magic_name__ ,args=__magic_name__ ,nprocs=__magic_name__ ,start_method="fork" )
656
1
'''simple docstring''' from __future__ import annotations from scipy.special import comb # type: ignore class A_ : """simple docstring""" def __init__( self :str , lowerCAmelCase__ :list[tuple[float, float]] ) -> str: '''simple docstring''' snake_case_ : Any = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. snake_case_ : Optional[Any] = len(lowerCAmelCase__ ) - 1 def _A ( self :int , lowerCAmelCase__ :float ) -> list[float]: '''simple docstring''' assert 0 <= t <= 1, "Time t must be between 0 and 1." snake_case_ : list[float] = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree , lowerCAmelCase__ ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(lowerCAmelCase__ ) , 5 ) == 1 return output_values def _A ( self :List[Any] , lowerCAmelCase__ :float ) -> tuple[float, float]: '''simple docstring''' assert 0 <= t <= 1, "Time t must be between 0 and 1." snake_case_ : Optional[int] = self.basis_function(lowerCAmelCase__ ) snake_case_ : Any = 0.0 snake_case_ : str = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def _A ( self :Tuple , lowerCAmelCase__ :float = 0.0_1 ) -> Union[str, Any]: '''simple docstring''' from matplotlib import pyplot as plt # type: ignore snake_case_ : list[float] = [] # x coordinates of points to plot snake_case_ : list[float] = [] # y coordinates of points to plot snake_case_ : Optional[int] = 0.0 while t <= 1: snake_case_ : Optional[int] = self.bezier_curve_function(lowerCAmelCase__ ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size snake_case_ : str = [i[0] for i in self.list_of_points] snake_case_ : int = [i[1] for i in self.list_of_points] plt.plot( lowerCAmelCase__ , lowerCAmelCase__ , color="blue" , label="Curve of Degree " + str(self.degree ) , ) plt.scatter(lowerCAmelCase__ , lowerCAmelCase__ , color="red" , label="Control Points" ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
656
'''simple docstring''' from collections import deque from math import floor from random import random from time import time class A_ : """simple docstring""" def __init__( self :Dict ) -> List[str]: '''simple docstring''' snake_case_ : int = {} def _A ( self :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any]=1 ) -> Any: '''simple docstring''' if self.graph.get(lowerCAmelCase__ ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: snake_case_ : Optional[int] = [[w, v]] if not self.graph.get(lowerCAmelCase__ ): snake_case_ : Dict = [] def _A ( self :List[Any] ) -> Optional[int]: '''simple docstring''' return list(self.graph ) def _A ( self :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :int ) -> List[Any]: '''simple docstring''' if self.graph.get(lowerCAmelCase__ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowerCAmelCase__ ) def _A ( self :List[str] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :str=-1 ) -> str: '''simple docstring''' if s == d: return [] snake_case_ : str = [] snake_case_ : Optional[int] = [] if s == -2: snake_case_ : List[Any] = list(self.graph )[0] stack.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) snake_case_ : Dict = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: snake_case_ : str = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowerCAmelCase__ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) snake_case_ : str = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowerCAmelCase__ ) != 0: snake_case_ : Union[str, Any] = stack[len(lowerCAmelCase__ ) - 1] else: snake_case_ : Optional[Any] = ss # check if se have reached the starting point if len(lowerCAmelCase__ ) == 0: return visited def _A ( self :Tuple , lowerCAmelCase__ :int=-1 ) -> int: '''simple docstring''' if c == -1: snake_case_ : Any = floor(random() * 10_000 ) + 10 for i in range(lowerCAmelCase__ ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): snake_case_ : Optional[Any] = floor(random() * c ) + 1 if n != i: self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 ) def _A ( self :Tuple , lowerCAmelCase__ :Dict=-2 ) -> Dict: '''simple docstring''' snake_case_ : Union[str, Any] = deque() snake_case_ : Optional[Any] = [] if s == -2: snake_case_ : Tuple = list(self.graph )[0] d.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) while d: snake_case_ : Optional[int] = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _A ( self :List[str] , lowerCAmelCase__ :str ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Tuple = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def _A ( self :Any , lowerCAmelCase__ :int ) -> Optional[Any]: '''simple docstring''' return len(self.graph[u] ) def _A ( self :Tuple , lowerCAmelCase__ :List[str]=-2 ) -> Optional[Any]: '''simple docstring''' snake_case_ : str = [] snake_case_ : str = [] if s == -2: snake_case_ : Optional[Any] = list(self.graph )[0] stack.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) snake_case_ : int = s snake_case_ : Optional[int] = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: snake_case_ : List[Any] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) snake_case_ : List[str] = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(lowerCAmelCase__ ) != 0: snake_case_ : int = stack[len(lowerCAmelCase__ ) - 1] else: snake_case_ : Union[str, Any] = ss # check if se have reached the starting point if len(lowerCAmelCase__ ) == 0: return sorted_nodes def _A ( self :Dict ) -> Any: '''simple docstring''' snake_case_ : Dict = [] snake_case_ : Any = [] snake_case_ : str = list(self.graph )[0] stack.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) snake_case_ : Optional[int] = -2 snake_case_ : Any = [] snake_case_ : List[Any] = s snake_case_ : int = False snake_case_ : Optional[int] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: snake_case_ : List[Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): snake_case_ : Any = len(lowerCAmelCase__ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) snake_case_ : Optional[int] = node[1] break # check if all the children are visited if s == ss: stack.pop() snake_case_ : Optional[Any] = True if len(lowerCAmelCase__ ) != 0: snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1] else: snake_case_ : str = False indirect_parents.append(lowerCAmelCase__ ) snake_case_ : List[str] = s snake_case_ : Optional[int] = ss # check if se have reached the starting point if len(lowerCAmelCase__ ) == 0: return list(lowerCAmelCase__ ) def _A ( self :Tuple ) -> List[str]: '''simple docstring''' snake_case_ : List[Any] = [] snake_case_ : Tuple = [] snake_case_ : List[str] = list(self.graph )[0] stack.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) snake_case_ : str = -2 snake_case_ : List[str] = [] snake_case_ : List[Any] = s snake_case_ : List[str] = False snake_case_ : Dict = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: snake_case_ : List[Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): snake_case_ : Any = len(lowerCAmelCase__ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) snake_case_ : str = node[1] break # check if all the children are visited if s == ss: stack.pop() snake_case_ : Tuple = True if len(lowerCAmelCase__ ) != 0: snake_case_ : List[Any] = stack[len(lowerCAmelCase__ ) - 1] else: snake_case_ : Optional[int] = False indirect_parents.append(lowerCAmelCase__ ) snake_case_ : int = s snake_case_ : Union[str, Any] = ss # check if se have reached the starting point if len(lowerCAmelCase__ ) == 0: return False def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[int]=-2 , lowerCAmelCase__ :Tuple=-1 ) -> str: '''simple docstring''' snake_case_ : Optional[int] = time() self.dfs(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : Optional[Any] = time() return end - begin def _A ( self :Any , lowerCAmelCase__ :Tuple=-2 ) -> Optional[Any]: '''simple docstring''' snake_case_ : Any = time() self.bfs(lowerCAmelCase__ ) snake_case_ : Any = time() return end - begin class A_ : """simple docstring""" def __init__( self :Tuple ) -> List[str]: '''simple docstring''' snake_case_ : Optional[Any] = {} def _A ( self :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=1 ) -> str: '''simple docstring''' if self.graph.get(lowerCAmelCase__ ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist snake_case_ : str = [[w, v]] # add the other way if self.graph.get(lowerCAmelCase__ ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist snake_case_ : List[str] = [[w, u]] def _A ( self :Dict , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]: '''simple docstring''' if self.graph.get(lowerCAmelCase__ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowerCAmelCase__ ) # the other way round if self.graph.get(lowerCAmelCase__ ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(lowerCAmelCase__ ) def _A ( self :Optional[Any] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> int: '''simple docstring''' if s == d: return [] snake_case_ : Any = [] snake_case_ : Dict = [] if s == -2: snake_case_ : Optional[int] = list(self.graph )[0] stack.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) snake_case_ : Tuple = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: snake_case_ : List[str] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowerCAmelCase__ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) snake_case_ : str = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowerCAmelCase__ ) != 0: snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1] else: snake_case_ : str = ss # check if se have reached the starting point if len(lowerCAmelCase__ ) == 0: return visited def _A ( self :Optional[int] , lowerCAmelCase__ :str=-1 ) -> List[Any]: '''simple docstring''' if c == -1: snake_case_ : Optional[int] = floor(random() * 10_000 ) + 10 for i in range(lowerCAmelCase__ ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): snake_case_ : str = floor(random() * c ) + 1 if n != i: self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 ) def _A ( self :Any , lowerCAmelCase__ :Optional[Any]=-2 ) -> List[Any]: '''simple docstring''' snake_case_ : List[str] = deque() snake_case_ : Optional[Any] = [] if s == -2: snake_case_ : List[Any] = list(self.graph )[0] d.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) while d: snake_case_ : Optional[int] = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _A ( self :str , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]: '''simple docstring''' return len(self.graph[u] ) def _A ( self :Union[str, Any] ) -> Dict: '''simple docstring''' snake_case_ : Any = [] snake_case_ : Optional[Any] = [] snake_case_ : Optional[Any] = list(self.graph )[0] stack.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) snake_case_ : Tuple = -2 snake_case_ : Optional[int] = [] snake_case_ : Tuple = s snake_case_ : Optional[Any] = False snake_case_ : Optional[int] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: snake_case_ : Optional[Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): snake_case_ : Optional[int] = len(lowerCAmelCase__ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) snake_case_ : Tuple = node[1] break # check if all the children are visited if s == ss: stack.pop() snake_case_ : Optional[int] = True if len(lowerCAmelCase__ ) != 0: snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1] else: snake_case_ : Optional[int] = False indirect_parents.append(lowerCAmelCase__ ) snake_case_ : List[Any] = s snake_case_ : Dict = ss # check if se have reached the starting point if len(lowerCAmelCase__ ) == 0: return list(lowerCAmelCase__ ) def _A ( self :Optional[Any] ) -> Tuple: '''simple docstring''' snake_case_ : Optional[Any] = [] snake_case_ : int = [] snake_case_ : List[str] = list(self.graph )[0] stack.append(lowerCAmelCase__ ) visited.append(lowerCAmelCase__ ) snake_case_ : Tuple = -2 snake_case_ : int = [] snake_case_ : int = s snake_case_ : Optional[Any] = False snake_case_ : List[Any] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: snake_case_ : Union[str, Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): snake_case_ : Tuple = len(lowerCAmelCase__ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) snake_case_ : Optional[Any] = node[1] break # check if all the children are visited if s == ss: stack.pop() snake_case_ : Optional[Any] = True if len(lowerCAmelCase__ ) != 0: snake_case_ : Tuple = stack[len(lowerCAmelCase__ ) - 1] else: snake_case_ : Optional[int] = False indirect_parents.append(lowerCAmelCase__ ) snake_case_ : Union[str, Any] = s snake_case_ : Tuple = ss # check if se have reached the starting point if len(lowerCAmelCase__ ) == 0: return False def _A ( self :Any ) -> Tuple: '''simple docstring''' return list(self.graph ) def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> str: '''simple docstring''' snake_case_ : List[str] = time() self.dfs(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : List[Any] = time() return end - begin def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any]=-2 ) -> int: '''simple docstring''' snake_case_ : List[str] = time() self.bfs(lowerCAmelCase__ ) snake_case_ : Tuple = time() return end - begin
656
1
'''simple docstring''' from __future__ import annotations import math def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> int: """simple docstring""" if depth < 0: raise ValueError("Depth cannot be less than 0" ) if len(__magic_name__ ) == 0: raise ValueError("Scores cannot be empty" ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 ,node_index * 2 ,__magic_name__ ,__magic_name__ ,__magic_name__ ) ,minimax(depth + 1 ,node_index * 2 + 1 ,__magic_name__ ,__magic_name__ ,__magic_name__ ) ,) return min( minimax(depth + 1 ,node_index * 2 ,__magic_name__ ,__magic_name__ ,__magic_name__ ) ,minimax(depth + 1 ,node_index * 2 + 1 ,__magic_name__ ,__magic_name__ ,__magic_name__ ) ,) def __UpperCAmelCase ( )-> None: """simple docstring""" snake_case_ : Tuple = [90, 23, 6, 33, 21, 65, 123, 3_4423] snake_case_ : Optional[int] = math.log(len(__magic_name__ ) ,2 ) print("Optimal value : " ,end="" ) print(minimax(0 ,0 ,__magic_name__ ,__magic_name__ ,__magic_name__ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
656
'''simple docstring''' import gzip import hashlib import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser __lowerCamelCase : List[str] = re.compile(R'''\s+''') def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]: """simple docstring""" return {"hash": hashlib.mda(re.sub(__magic_name__ ,"" ,example["content"] ).encode("utf-8" ) ).hexdigest()} def __UpperCAmelCase ( __magic_name__ )-> str: """simple docstring""" snake_case_ : Optional[Any] = [len(__magic_name__ ) for line in example["content"].splitlines()] return {"line_mean": np.mean(__magic_name__ ), "line_max": max(__magic_name__ )} def __UpperCAmelCase ( __magic_name__ )-> int: """simple docstring""" snake_case_ : Optional[int] = np.mean([c.isalnum() for c in example["content"]] ) return {"alpha_frac": alpha_frac} def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Tuple: """simple docstring""" if example["hash"] in uniques: uniques.remove(example["hash"] ) return True else: return False def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 )-> Tuple: """simple docstring""" snake_case_ : List[str] = ["auto-generated", "autogenerated", "automatically generated"] snake_case_ : Optional[Any] = example["content"].splitlines() for _, line in zip(range(__magic_name__ ) ,__magic_name__ ): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 ,__magic_name__=0.05 )-> Optional[Any]: """simple docstring""" snake_case_ : str = ["unit tests", "test file", "configuration file"] snake_case_ : int = example["content"].splitlines() snake_case_ : Optional[Any] = 0 snake_case_ : Any = 0 # first test for _, line in zip(range(__magic_name__ ) ,__magic_name__ ): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test snake_case_ : Tuple = example["content"].count("\n" ) snake_case_ : int = int(coeff * nlines ) for line in lines: count_config += line.lower().count("config" ) count_test += line.lower().count("test" ) if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def __UpperCAmelCase ( __magic_name__ )-> str: """simple docstring""" snake_case_ : List[Any] = ["def ", "class ", "for ", "while "] snake_case_ : Optional[Any] = example["content"].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def __UpperCAmelCase ( __magic_name__ ,__magic_name__=4 )-> Optional[int]: """simple docstring""" snake_case_ : Tuple = example["content"].splitlines() snake_case_ : Tuple = 0 for line in lines: counter += line.lower().count("=" ) if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def __UpperCAmelCase ( __magic_name__ )-> List[Any]: """simple docstring""" snake_case_ : Tuple = tokenizer(example["content"] ,truncation=__magic_name__ )["input_ids"] snake_case_ : int = len(example["content"] ) / len(__magic_name__ ) return {"ratio": ratio} def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]: """simple docstring""" snake_case_ : Union[str, Any] = {} results.update(get_hash(__magic_name__ ) ) results.update(line_stats(__magic_name__ ) ) results.update(alpha_stats(__magic_name__ ) ) results.update(char_token_ratio(__magic_name__ ) ) results.update(is_autogenerated(__magic_name__ ) ) results.update(is_config_or_test(__magic_name__ ) ) results.update(has_no_keywords(__magic_name__ ) ) results.update(has_few_assignments(__magic_name__ ) ) return results def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Tuple: """simple docstring""" if not check_uniques(__magic_name__ ,__magic_name__ ): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def __UpperCAmelCase ( __magic_name__ )-> Dict: """simple docstring""" with open(__magic_name__ ,"rb" ) as f_in: with gzip.open(str(__magic_name__ ) + ".gz" ,"wb" ,compresslevel=6 ) as f_out: shutil.copyfileobj(__magic_name__ ,__magic_name__ ) os.unlink(__magic_name__ ) # Settings __lowerCamelCase : List[Any] = HfArgumentParser(PreprocessingArguments) __lowerCamelCase : str = parser.parse_args() if args.num_workers is None: __lowerCamelCase : List[Any] = multiprocessing.cpu_count() __lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset __lowerCamelCase : Any = time.time() __lowerCamelCase : str = load_dataset(args.dataset_name, split='''train''') print(f'''Time to load dataset: {time.time()-t_start:.2f}''') # Run preprocessing __lowerCamelCase : List[str] = time.time() __lowerCamelCase : Any = ds.map(preprocess, num_proc=args.num_workers) print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''') # Deduplicate hashes __lowerCamelCase : Any = set(ds.unique('''hash''')) __lowerCamelCase : Optional[int] = len(uniques) / len(ds) print(f'''Fraction of duplicates: {1-frac:.2%}''') # Deduplicate data and apply heuristics __lowerCamelCase : List[str] = time.time() __lowerCamelCase : Tuple = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args}) print(f'''Time to filter dataset: {time.time()-t_start:.2f}''') print(f'''Size of filtered dataset: {len(ds_filter)}''') # Deduplicate with minhash and jaccard similarity if args.near_deduplication: __lowerCamelCase : List[str] = time.time() __lowerCamelCase , __lowerCamelCase : Tuple = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''') print(f'''Size of deduplicate dataset: {len(ds_filter)}''') # Save data in batches of samples_per_file __lowerCamelCase : List[Any] = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / '''duplicate_clusters.json''', '''w''') as f: json.dump(duplicate_clusters, f) __lowerCamelCase : List[str] = output_dir / '''data''' data_dir.mkdir(exist_ok=True) __lowerCamelCase : int = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): __lowerCamelCase : Union[str, Any] = str(data_dir / f'''file-{file_number+1:012}.json''') __lowerCamelCase : List[Any] = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
656
1
'''simple docstring''' __lowerCamelCase : dict[str, float] = { "km/h": 1.0, "m/s": 3.6, "mph": 1.609_344, "knot": 1.852, } __lowerCamelCase : dict[str, float] = { "km/h": 1.0, "m/s": 0.277_777_778, "mph": 0.621_371_192, "knot": 0.539_956_803, } def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> float: """simple docstring""" if unit_to not in speed_chart or unit_from not in speed_chart_inverse: snake_case_ : str = ( F'''Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n''' F'''Valid values are: {', '.join(__magic_name__ )}''' ) raise ValueError(__magic_name__ ) return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] ,3 ) if __name__ == "__main__": import doctest doctest.testmod()
656
'''simple docstring''' import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class A_ (unittest.TestCase ): """simple docstring""" def _A ( self :str ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Union[str, Any] = torch.nn.Linear(10 , 10 ) snake_case_ : Dict = torch.optim.SGD(model.parameters() , 0.1 ) snake_case_ : Tuple = Accelerator() snake_case_ : Optional[Any] = accelerator.prepare(lowerCAmelCase__ ) try: pickle.loads(pickle.dumps(lowerCAmelCase__ ) ) except Exception as e: self.fail(F'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
656
1
'''simple docstring''' from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class A_ (a_ ): """simple docstring""" a__ = DistilBertTokenizer a__ = DistilBertTokenizerFast a__ = True @slow def _A ( self :str ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[str] = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" ) snake_case_ : str = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__ ) snake_case_ : int = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__ ) snake_case_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ ) snake_case_ : Any = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
656
'''simple docstring''' import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py __lowerCamelCase : Any = '''src/transformers''' # This is to make sure the transformers module imported is the one in the repo. __lowerCamelCase : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS) __lowerCamelCase : Optional[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` __lowerCamelCase : Union[str, Any] = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''') __lowerCamelCase : Any = { '''DecisionTransformerConfig''', '''EncoderDecoderConfig''', '''MusicgenConfig''', '''RagConfig''', '''SpeechEncoderDecoderConfig''', '''TimmBackboneConfig''', '''VisionEncoderDecoderConfig''', '''VisionTextDualEncoderConfig''', '''LlamaConfig''', } def __UpperCAmelCase ( __magic_name__ )-> List[Any]: """simple docstring""" snake_case_ : Tuple = None # source code of `config_class` snake_case_ : List[Any] = inspect.getsource(__magic_name__ ) snake_case_ : List[str] = _re_checkpoint.findall(__magic_name__ ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith("/" ): snake_case_ : Optional[Any] = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link snake_case_ : str = F'''https://huggingface.co/{ckpt_name}''' if ckpt_link == ckpt_link_from_name: snake_case_ : Dict = ckpt_name break return checkpoint def __UpperCAmelCase ( )-> Dict: """simple docstring""" snake_case_ : Optional[int] = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue snake_case_ : str = get_checkpoint_from_config_class(__magic_name__ ) snake_case_ : Union[str, Any] = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(__magic_name__ ) if len(__magic_name__ ) > 0: snake_case_ : Tuple = "\n".join(sorted(__magic_name__ ) ) raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
656
1
'''simple docstring''' def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str: """simple docstring""" if number < 0 or shift_amount < 0: raise ValueError("both inputs must be positive integers" ) snake_case_ : str = str(bin(__magic_name__ ) ) binary_number += "0" * shift_amount return binary_number def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str: """simple docstring""" if number < 0 or shift_amount < 0: raise ValueError("both inputs must be positive integers" ) snake_case_ : int = str(bin(__magic_name__ ) )[2:] if shift_amount >= len(__magic_name__ ): return "0b0" snake_case_ : Dict = binary_number[: len(__magic_name__ ) - shift_amount] return "0b" + shifted_binary_number def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str: """simple docstring""" if number >= 0: # Get binary representation of positive number snake_case_ : Union[str, Any] = "0" + str(bin(__magic_name__ ) ).strip("-" )[2:] else: # Get binary (2's complement) representation of negative number snake_case_ : Union[str, Any] = len(bin(__magic_name__ )[3:] ) # Find 2's complement of number snake_case_ : Any = bin(abs(__magic_name__ ) - (1 << binary_number_length) )[3:] snake_case_ : int = ( "1" + "0" * (binary_number_length - len(__magic_name__ )) + binary_number ) if shift_amount >= len(__magic_name__ ): return "0b" + binary_number[0] * len(__magic_name__ ) return ( "0b" + binary_number[0] * shift_amount + binary_number[: len(__magic_name__ ) - shift_amount] ) if __name__ == "__main__": import doctest doctest.testmod()
656
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : List[str] = logging.get_logger(__name__) __lowerCamelCase : int = { '''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''', # See all Cvt models at https://huggingface.co/models?filter=cvt } class A_ (a_ ): """simple docstring""" a__ = '''cvt''' def __init__( self :List[Any] , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Any=[7, 3, 3] , lowerCAmelCase__ :Dict=[4, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[2, 1, 1] , lowerCAmelCase__ :Any=[64, 192, 384] , lowerCAmelCase__ :List[str]=[1, 3, 6] , lowerCAmelCase__ :str=[1, 2, 10] , lowerCAmelCase__ :Any=[4.0, 4.0, 4.0] , lowerCAmelCase__ :int=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Optional[Any]=[0.0, 0.0, 0.0] , lowerCAmelCase__ :Dict=[0.0, 0.0, 0.1] , lowerCAmelCase__ :List[Any]=[True, True, True] , lowerCAmelCase__ :List[Any]=[False, False, True] , lowerCAmelCase__ :Dict=["dw_bn", "dw_bn", "dw_bn"] , lowerCAmelCase__ :Any=[3, 3, 3] , lowerCAmelCase__ :Tuple=[1, 1, 1] , lowerCAmelCase__ :Optional[int]=[2, 2, 2] , lowerCAmelCase__ :Union[str, Any]=[1, 1, 1] , lowerCAmelCase__ :Any=[1, 1, 1] , lowerCAmelCase__ :List[str]=0.0_2 , lowerCAmelCase__ :Dict=1E-1_2 , **lowerCAmelCase__ :Optional[Any] , ) -> str: '''simple docstring''' super().__init__(**lowerCAmelCase__ ) snake_case_ : int = num_channels snake_case_ : int = patch_sizes snake_case_ : Optional[Any] = patch_stride snake_case_ : Dict = patch_padding snake_case_ : Tuple = embed_dim snake_case_ : Optional[int] = num_heads snake_case_ : Union[str, Any] = depth snake_case_ : Optional[int] = mlp_ratio snake_case_ : Tuple = attention_drop_rate snake_case_ : str = drop_rate snake_case_ : Tuple = drop_path_rate snake_case_ : Any = qkv_bias snake_case_ : Union[str, Any] = cls_token snake_case_ : int = qkv_projection_method snake_case_ : Any = kernel_qkv snake_case_ : Union[str, Any] = padding_kv snake_case_ : str = stride_kv snake_case_ : Dict = padding_q snake_case_ : Tuple = stride_q snake_case_ : Any = initializer_range snake_case_ : Any = layer_norm_eps
656
1
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=a_ ) class A_ (a_ ): """simple docstring""" a__ = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) a__ = Features({'''image''': Image()} ) a__ = Features({'''labels''': ClassLabel} ) a__ = "image" a__ = "labels" def _A ( self :Any , lowerCAmelCase__ :int ) -> int: '''simple docstring''' if self.label_column not in features: raise ValueError(F'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , lowerCAmelCase__ ): raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' ) snake_case_ : Dict = copy.deepcopy(self ) snake_case_ : Union[str, Any] = self.label_schema.copy() snake_case_ : List[str] = features[self.label_column] snake_case_ : Tuple = label_schema return task_template @property def _A ( self :Tuple ) -> Dict[str, str]: '''simple docstring''' return { self.image_column: "image", self.label_column: "labels", }
656
'''simple docstring''' import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets __lowerCamelCase : str = '''\ @inproceedings{snover-etal-2006-study, title = "A Study of Translation Edit Rate with Targeted Human Annotation", author = "Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John", booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers", month = aug # " 8-12", year = "2006", address = "Cambridge, Massachusetts, USA", publisher = "Association for Machine Translation in the Americas", url = "https://aclanthology.org/2006.amta-papers.25", pages = "223--231", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' __lowerCamelCase : Dict = '''\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. ''' __lowerCamelCase : int = ''' Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: \'score\' (float): TER score (num_edits / sum_ref_lengths * 100) \'num_edits\' (int): The cumulative number of edits \'ref_length\' (float): The cumulative average reference length Examples: Example 1: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0} Example 2: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0} Example 3: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5} Example 4: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0} Example 5: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A_ (datasets.Metric ): """simple docstring""" def _A ( self :Optional[Any] ) -> Optional[int]: '''simple docstring''' if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[ "https://github.com/jhclark/tercom", ] , ) def _A ( self :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , ) -> Optional[Any]: '''simple docstring''' snake_case_ : List[str] = len(references[0] ) if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) snake_case_ : List[str] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )] snake_case_ : List[str] = TER( normalized=lowerCAmelCase__ , no_punct=lowerCAmelCase__ , asian_support=lowerCAmelCase__ , case_sensitive=lowerCAmelCase__ , ) snake_case_ : Any = sb_ter.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
656
1
'''simple docstring''' # Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCamelCase : Optional[Any] = { '''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''], '''tokenization_cpmant''': ['''CpmAntTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[str] = [ '''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CpmAntForCausalLM''', '''CpmAntModel''', '''CpmAntPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys __lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
656
'''simple docstring''' from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def __UpperCAmelCase ( )-> int: """simple docstring""" snake_case_ : Any = { "repo_name": ["test_repo1", "test_repo2", "test_repo3"], "path": ["test_1.py", "test_2.py", "unit_test.py"], "content": ["a " * 20, "a " * 30, "b " * 7], } snake_case_ : int = Dataset.from_dict(__magic_name__ ) return dataset class A_ (a_ ): """simple docstring""" def _A ( self :List[str] ) -> str: '''simple docstring''' snake_case_ : Union[str, Any] = get_dataset() snake_case_ : Optional[int] = make_duplicate_clusters(lowerCAmelCase__ , 0.8_5 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def _A ( self :Union[str, Any] ) -> List[str]: '''simple docstring''' snake_case_ : Optional[int] = get_dataset() snake_case_, snake_case_ : List[Any] = deduplicate_dataset(lowerCAmelCase__ ) self.assertEqual(len(lowerCAmelCase__ ) , 2 ) print(lowerCAmelCase__ ) self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 ) self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , lowerCAmelCase__ )
656
1
'''simple docstring''' __lowerCamelCase : dict[str, float] = { "joule": 1.0, "kilojoule": 1000, "megajoule": 1000000, "gigajoule": 1000000000, "wattsecond": 1.0, "watthour": 3600, "kilowatthour": 3600000, "newtonmeter": 1.0, "calorie_nutr": 4186.8, "kilocalorie_nutr": 4186800.00, "electronvolt": 1.6_0217_6634E-19, "britishthermalunit_it": 1055.05585, "footpound": 1.355_818, } def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> float: """simple docstring""" if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: snake_case_ : int = ( F'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n''' F'''Valid values are: {', '.join(__magic_name__ )}''' ) raise ValueError(__magic_name__ ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
656
'''simple docstring''' from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_torch_available from ...utils import OptionalDependencyNotAvailable __lowerCamelCase : Dict = { '''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''], '''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : int = [ '''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTNeoXJapaneseForCausalLM''', '''GPTNeoXJapaneseLayer''', '''GPTNeoXJapaneseModel''', '''GPTNeoXJapanesePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox_japanese import ( GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseLayer, GPTNeoXJapaneseModel, GPTNeoXJapanesePreTrainedModel, ) else: import sys __lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
656
1
'''simple docstring''' import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def __UpperCAmelCase ( __magic_name__ )-> Dict: # picklable for multiprocessing """simple docstring""" return x.sum() def __UpperCAmelCase ( __magic_name__ )-> List[Any]: # picklable for multiprocessing """simple docstring""" return i + 1 @dataclass class A_ : """simple docstring""" a__ = 42 a__ = 42 class A_ (a_ ): """simple docstring""" def _A ( self :Dict ) -> Any: '''simple docstring''' snake_case_ : List[str] = {} snake_case_ : Optional[Any] = [] snake_case_ : str = 1 snake_case_ : Optional[int] = [1, 2] snake_case_ : Optional[int] = {"a": 1, "b": 2} snake_case_ : List[str] = {"a": [1, 2], "b": [3, 4]} snake_case_ : Dict = {"a": {"1": 1}, "b": 2} snake_case_ : Any = {"a": 1, "b": 2, "c": 3, "d": 4} snake_case_ : Optional[Any] = {} snake_case_ : Any = [] snake_case_ : List[Any] = 2 snake_case_ : Tuple = [2, 3] snake_case_ : Any = {"a": 2, "b": 3} snake_case_ : str = {"a": [2, 3], "b": [4, 5]} snake_case_ : Optional[int] = {"a": {"1": 2}, "b": 3} snake_case_ : int = {"a": 2, "b": 3, "c": 4, "d": 5} self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ ) snake_case_ : Optional[int] = 2 self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=lowerCAmelCase__ ) , lowerCAmelCase__ ) snake_case_ : List[Any] = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )} snake_case_ : Union[str, Any] = {"a": 2, "b": 0, "c": 2} snake_case_ : Dict = { "a": np.eye(2 ).astype(lowerCAmelCase__ ), "b": np.zeros(3 ).astype(lowerCAmelCase__ ), "c": np.ones(2 ).astype(lowerCAmelCase__ ), } self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ , map_numpy=lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual( {k: v.tolist() for k, v in map_nested(lowerCAmelCase__ , lowerCAmelCase__ , map_numpy=lowerCAmelCase__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(lowerCAmelCase__ , lowerCAmelCase__ , map_numpy=lowerCAmelCase__ , num_proc=lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual( {k: v.tolist() for k, v in map_nested(lowerCAmelCase__ , lowerCAmelCase__ , map_numpy=lowerCAmelCase__ , num_proc=lowerCAmelCase__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(lowerCAmelCase__ ): # can't pickle a local lambda map_nested(lambda lowerCAmelCase__ : x + 1 , lowerCAmelCase__ , num_proc=lowerCAmelCase__ ) def _A ( self :Union[str, Any] ) -> List[str]: '''simple docstring''' snake_case_ : Union[str, Any] = {"a": 1, "b": 2} snake_case_ : List[Any] = {"a": 3, "b": 4} snake_case_ : Optional[int] = {"a": 5, "b": 6} snake_case_ : Optional[Any] = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) ) , lowerCAmelCase__ ) def _A ( self :Tuple ) -> str: '''simple docstring''' class A_ : """simple docstring""" a__ = '''bar''' snake_case_ : int = Foo() self.assertEqual(foo.my_attr , "bar" ) with temporary_assignment(lowerCAmelCase__ , "my_attr" , "BAR" ): self.assertEqual(foo.my_attr , "BAR" ) self.assertEqual(foo.my_attr , "bar" ) @pytest.mark.parametrize( "iterable_length, num_proc, expected_num_proc" ,[ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ] ,) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int: """simple docstring""" with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch( "datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool: snake_case_ : Any = {F'''{i}''': i for i in range(__magic_name__ )} snake_case_ : Any = map_nested(lambda __magic_name__ : x + 10 ,__magic_name__ ,num_proc=__magic_name__ ,parallel_min_length=16 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class A_ (a_ ): """simple docstring""" @require_tf def _A ( self :Any ) -> Any: '''simple docstring''' import tensorflow as tf from tensorflow.keras import layers snake_case_ : List[str] = layers.Dense(2 ) def gen_random_output(): snake_case_ : List[Any] = tf.random.uniform((1, 3) ) return model(lowerCAmelCase__ ).numpy() with temp_seed(42 , set_tensorflow=lowerCAmelCase__ ): snake_case_ : Union[str, Any] = gen_random_output() with temp_seed(42 , set_tensorflow=lowerCAmelCase__ ): snake_case_ : str = gen_random_output() snake_case_ : str = gen_random_output() np.testing.assert_equal(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @require_torch def _A ( self :Tuple ) -> Any: '''simple docstring''' import torch def gen_random_output(): snake_case_ : Dict = torch.nn.Linear(3 , 2 ) snake_case_ : int = torch.rand(1 , 3 ) return model(lowerCAmelCase__ ).detach().numpy() with temp_seed(42 , set_pytorch=lowerCAmelCase__ ): snake_case_ : Any = gen_random_output() with temp_seed(42 , set_pytorch=lowerCAmelCase__ ): snake_case_ : Any = gen_random_output() snake_case_ : Tuple = gen_random_output() np.testing.assert_equal(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) def _A ( self :Any ) -> Tuple: '''simple docstring''' def gen_random_output(): return np.random.rand(1 , 3 ) with temp_seed(42 ): snake_case_ : List[str] = gen_random_output() with temp_seed(42 ): snake_case_ : int = gen_random_output() snake_case_ : Any = gen_random_output() np.testing.assert_equal(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @pytest.mark.parametrize("input_data" ,[{}] ) def __UpperCAmelCase ( __magic_name__ )-> Dict: """simple docstring""" snake_case_ : str = NestedDataStructure(__magic_name__ ).data assert output_data == input_data @pytest.mark.parametrize( "data, expected_output" ,[ ({}, []), ([], []), ("foo", ["foo"]), (["foo", "bar"], ["foo", "bar"]), ([["foo", "bar"]], ["foo", "bar"]), ([[["foo"], ["bar"]]], ["foo", "bar"]), ([[["foo"], "bar"]], ["foo", "bar"]), ({"a": 1, "b": 2}, [1, 2]), ({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]), ({"a": {"1": 1}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": [2]}, [1, 2]), ] ,) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Optional[Any]: """simple docstring""" snake_case_ : Any = NestedDataStructure(__magic_name__ ).flatten() assert output == expected_output def __UpperCAmelCase ( )-> Union[str, Any]: """simple docstring""" snake_case_ : Tuple = A(x=1 ,y="foobar" ) snake_case_ : Tuple = {"x": 1, "y": "foobar"} assert asdict(__magic_name__ ) == expected_output snake_case_ : int = {"a": {"b": A(x=10 ,y="foo" )}, "c": [A(x=20 ,y="bar" )]} snake_case_ : str = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]} assert asdict(__magic_name__ ) == expected_output with pytest.raises(__magic_name__ ): asdict([1, A(x=10 ,y="foo" )] ) def __UpperCAmelCase ( __magic_name__ )-> Tuple: """simple docstring""" return text.split() def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]: """simple docstring""" yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def __UpperCAmelCase ( )-> Tuple: """simple docstring""" with Pool(2 ) as pool: snake_case_ : List[Any] = list(iflatmap_unordered(__magic_name__ ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) ) assert out.count("hello" ) == 10 assert out.count("there" ) == 10 assert len(__magic_name__ ) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: snake_case_ : List[str] = list(iflatmap_unordered(__magic_name__ ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) ) assert out.count("hello" ) == 10 assert out.count("there" ) == 10 assert len(__magic_name__ ) == 20 # check that we get items as fast as possible with Pool(2 ) as pool: snake_case_ : Any = [] for yield_time, content in iflatmap_unordered( __magic_name__ ,_aseconds_generator_of_aitems_with_timing ,kwargs_iterable=[{"content": "a"}, {"content": "b"}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(__magic_name__ ) assert out.count("a" ) == 2 assert out.count("b" ) == 2 assert len(__magic_name__ ) == 4
656
'''simple docstring''' def __UpperCAmelCase ( __magic_name__ )-> list[int]: """simple docstring""" if length <= 0 or not isinstance(__magic_name__ ,__magic_name__ ): raise ValueError("Length must be a positive integer." ) return [n * (2 * n - 1) for n in range(__magic_name__ )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
656
1
'''simple docstring''' def __UpperCAmelCase ( __magic_name__ = 100_0000 )-> int: """simple docstring""" snake_case_ : Any = set(range(3 ,__magic_name__ ,2 ) ) primes.add(2 ) for p in range(3 ,__magic_name__ ,2 ): if p not in primes: continue primes.difference_update(set(range(p * p ,__magic_name__ ,__magic_name__ ) ) ) snake_case_ : Optional[Any] = [float(__magic_name__ ) for n in range(limit + 1 )] for p in primes: for n in range(__magic_name__ ,limit + 1 ,__magic_name__ ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(f'''{solution() = }''')
656
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def __UpperCAmelCase ( __magic_name__=None )-> List[str]: """simple docstring""" if subparsers is not None: snake_case_ : List[str] = subparsers.add_parser("test" ) else: snake_case_ : List[Any] = argparse.ArgumentParser("Accelerate test command" ) parser.add_argument( "--config_file" ,default=__magic_name__ ,help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) ,) if subparsers is not None: parser.set_defaults(func=__magic_name__ ) return parser def __UpperCAmelCase ( __magic_name__ )-> Tuple: """simple docstring""" snake_case_ : Optional[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] ) if args.config_file is None: snake_case_ : str = script_name else: snake_case_ : Any = F'''--config_file={args.config_file} {script_name}''' snake_case_ : Union[str, Any] = ["accelerate-launch"] + test_args.split() snake_case_ : Optional[int] = execute_subprocess_async(__magic_name__ ,env=os.environ.copy() ) if result.returncode == 0: print("Test is a success! You are ready for your distributed training!" ) def __UpperCAmelCase ( )-> int: """simple docstring""" snake_case_ : Dict = test_command_parser() snake_case_ : Dict = parser.parse_args() test_command(__magic_name__ ) if __name__ == "__main__": main()
656
1
'''simple docstring''' def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> bool: """simple docstring""" snake_case_ : List[Any] = len(__magic_name__ ) snake_case_ : List[str] = len(__magic_name__ ) snake_case_ : str = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] snake_case_ : Any = True for i in range(__magic_name__ ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: snake_case_ : str = True if a[i].islower(): snake_case_ : int = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
656
'''simple docstring''' from scipy.stats import spearmanr import datasets __lowerCamelCase : str = ''' The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. ''' __lowerCamelCase : int = ''' Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {\'spearmanr\': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results[\'spearmanr\']) -0.7 >>> print(round(results[\'spearmanr_pvalue\'], 2)) 0.19 ''' __lowerCamelCase : List[str] = R'''\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A_ (datasets.Metric ): """simple docstring""" def _A ( self :str ) -> Union[str, Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , ) def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any]=False ) -> List[Any]: '''simple docstring''' snake_case_ : Optional[Any] = spearmanr(lowerCAmelCase__ , lowerCAmelCase__ ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
656
1
'''simple docstring''' from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class A_ (a_ , a_ , a_ ): """simple docstring""" a__ = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias'''] @register_to_config def __init__( self :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :int = 50_257 , lowerCAmelCase__ :int = 1_024 , lowerCAmelCase__ :int = 768 , lowerCAmelCase__ :int = 12 , lowerCAmelCase__ :int = 12 , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :str = "gelu_new" , lowerCAmelCase__ :float = 0.1 , lowerCAmelCase__ :float = 0.1 , lowerCAmelCase__ :float = 0.1 , lowerCAmelCase__ :float = 1E-5 , lowerCAmelCase__ :float = 0.0_2 , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , ) -> List[Any]: '''simple docstring''' super().__init__() snake_case_ : str = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and''' F''' `n_embd`: {n_embd} are not equal.''' ) snake_case_ : Any = prefix_inner_dim snake_case_ : List[Any] = prefix_hidden_dim snake_case_ : str = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) snake_case_ : Optional[int] = ( nn.Linear(self.prefix_hidden_dim , lowerCAmelCase__ ) if self.prefix_hidden_dim is not None else nn.Identity() ) snake_case_ : int = GPTaConfig( vocab_size=lowerCAmelCase__ , n_positions=lowerCAmelCase__ , n_embd=lowerCAmelCase__ , n_layer=lowerCAmelCase__ , n_head=lowerCAmelCase__ , n_inner=lowerCAmelCase__ , activation_function=lowerCAmelCase__ , resid_pdrop=lowerCAmelCase__ , embd_pdrop=lowerCAmelCase__ , attn_pdrop=lowerCAmelCase__ , layer_norm_epsilon=lowerCAmelCase__ , initializer_range=lowerCAmelCase__ , scale_attn_weights=lowerCAmelCase__ , use_cache=lowerCAmelCase__ , scale_attn_by_inverse_layer_idx=lowerCAmelCase__ , reorder_and_upcast_attn=lowerCAmelCase__ , ) snake_case_ : Dict = GPTaLMHeadModel(lowerCAmelCase__ ) def _A ( self :str , lowerCAmelCase__ :torch.Tensor , lowerCAmelCase__ :torch.Tensor , lowerCAmelCase__ :Optional[torch.Tensor] = None , lowerCAmelCase__ :Optional[torch.Tensor] = None , ) -> int: '''simple docstring''' snake_case_ : Dict = self.transformer.transformer.wte(lowerCAmelCase__ ) snake_case_ : List[Any] = self.encode_prefix(lowerCAmelCase__ ) snake_case_ : List[Any] = self.decode_prefix(lowerCAmelCase__ ) snake_case_ : List[str] = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: snake_case_ : int = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) snake_case_ : Optional[Any] = torch.cat((dummy_token, input_ids) , dim=1 ) snake_case_ : List[str] = self.transformer(inputs_embeds=lowerCAmelCase__ , labels=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def _A ( self :Union[str, Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.device ) -> torch.Tensor: '''simple docstring''' return torch.zeros(lowerCAmelCase__ , self.prefix_length , dtype=torch.intaa , device=lowerCAmelCase__ ) def _A ( self :List[Any] , lowerCAmelCase__ :Union[str, Any] ) -> Dict: '''simple docstring''' return self.encode_prefix(lowerCAmelCase__ ) @torch.no_grad() def _A ( self :Union[str, Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple ) -> str: '''simple docstring''' snake_case_ : List[str] = torch.split(lowerCAmelCase__ , 1 , dim=0 ) snake_case_ : Any = [] snake_case_ : int = [] for feature in features: snake_case_ : Dict = self.decode_prefix(feature.to(lowerCAmelCase__ ) ) # back to the clip feature # Only support beam search for now snake_case_, snake_case_ : int = self.generate_beam( input_embeds=lowerCAmelCase__ , device=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) snake_case_ : List[Any] = torch.stack(lowerCAmelCase__ ) snake_case_ : str = torch.stack(lowerCAmelCase__ ) return generated_tokens, generated_seq_lengths @torch.no_grad() def _A ( self :Any , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :int=None , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :int = 5 , lowerCAmelCase__ :int = 67 , lowerCAmelCase__ :float = 1.0 , lowerCAmelCase__ :Optional[int] = None , ) -> str: '''simple docstring''' snake_case_ : Union[str, Any] = eos_token_id snake_case_ : str = None snake_case_ : int = None snake_case_ : Tuple = torch.ones(lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=torch.int ) snake_case_ : Dict = torch.zeros(lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=torch.bool ) if input_embeds is not None: snake_case_ : List[str] = input_embeds else: snake_case_ : List[str] = self.transformer.transformer.wte(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ ): snake_case_ : Any = self.transformer(inputs_embeds=lowerCAmelCase__ ) snake_case_ : Optional[Any] = outputs.logits snake_case_ : str = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) snake_case_ : List[str] = logits.softmax(-1 ).log() if scores is None: snake_case_, snake_case_ : Optional[Any] = logits.topk(lowerCAmelCase__ , -1 ) snake_case_ : List[str] = generated.expand(lowerCAmelCase__ , *generated.shape[1:] ) snake_case_, snake_case_ : int = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: snake_case_ : Union[str, Any] = next_tokens else: snake_case_ : Optional[Any] = tokens.expand(lowerCAmelCase__ , *tokens.shape[1:] ) snake_case_ : Any = torch.cat((tokens, next_tokens) , dim=1 ) else: snake_case_ : Tuple = -float(np.inf ) snake_case_ : List[Any] = 0 snake_case_ : Dict = scores[:, None] + logits seq_lengths[~is_stopped] += 1 snake_case_ : Any = scores_sum / seq_lengths[:, None] snake_case_, snake_case_ : Dict = scores_sum_average.view(-1 ).topk(lowerCAmelCase__ , -1 ) snake_case_ : Optional[Any] = next_tokens // scores_sum.shape[1] snake_case_ : Optional[int] = seq_lengths[next_tokens_source] snake_case_ : Optional[int] = next_tokens % scores_sum.shape[1] snake_case_ : List[str] = next_tokens.unsqueeze(1 ) snake_case_ : Dict = tokens[next_tokens_source] snake_case_ : Tuple = torch.cat((tokens, next_tokens) , dim=1 ) snake_case_ : Optional[Any] = generated[next_tokens_source] snake_case_ : List[str] = scores_sum_average * seq_lengths snake_case_ : Optional[int] = is_stopped[next_tokens_source] snake_case_ : List[str] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) snake_case_ : int = torch.cat((generated, next_token_embed) , dim=1 ) snake_case_ : Any = is_stopped + next_tokens.eq(lowerCAmelCase__ ).squeeze() if is_stopped.all(): break snake_case_ : List[str] = scores / seq_lengths snake_case_ : Dict = scores.argsort(descending=lowerCAmelCase__ ) # tokens tensors are already padded to max_seq_length snake_case_ : int = [tokens[i] for i in order] snake_case_ : str = torch.stack(lowerCAmelCase__ , dim=0 ) snake_case_ : str = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
656
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): __lowerCamelCase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right __lowerCamelCase : str = 128022 __lowerCamelCase : List[Any] = 128028 @require_sentencepiece class A_ (a_ , unittest.TestCase ): """simple docstring""" a__ = MaMaaaTokenizer a__ = False a__ = False a__ = True def _A ( self :Union[str, Any] ) -> List[str]: '''simple docstring''' super().setUp() snake_case_ : int = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"] snake_case_ : Any = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) snake_case_ : Optional[int] = Path(self.tmpdirname ) save_json(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(lowerCAmelCase__ , save_dir / VOCAB_FILES_NAMES["spm_file"] ) snake_case_ : Union[str, Any] = MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def _A ( self :List[Any] , **lowerCAmelCase__ :List[Any] ) -> str: '''simple docstring''' return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def _A ( self :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[int]: '''simple docstring''' return ( "This is a test", "This is a test", ) def _A ( self :List[str] ) -> Union[str, Any]: '''simple docstring''' snake_case_ : str = "</s>" snake_case_ : Union[str, Any] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ ) def _A ( self :Union[str, Any] ) -> List[str]: '''simple docstring''' snake_case_ : Union[str, Any] = self.get_tokenizer() snake_case_ : Any = list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "</s>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "<s>" ) self.assertEqual(len(lowerCAmelCase__ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip("Skip this test while all models are still to be uploaded." ) def _A ( self :List[Any] ) -> Union[str, Any]: '''simple docstring''' pass def _A ( self :Optional[int] ) -> int: '''simple docstring''' snake_case_ : int = self.get_tokenizer() snake_case_ : List[str] = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [2, 3, 4, 5, 6] , ) snake_case_ : Any = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) snake_case_ : Any = tokenizer.convert_tokens_to_string(lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , "This is a test" ) @slow def _A ( self :Any ) -> List[Any]: '''simple docstring''' snake_case_ : int = {"input_ids": [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase__ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , ) @require_torch @require_sentencepiece @require_tokenizers class A_ (unittest.TestCase ): """simple docstring""" a__ = '''facebook/m2m100_418M''' a__ = [ '''In my opinion, there are two levels of response from the French government.''', '''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''', ] a__ = [ '''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''', '''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''', ] # fmt: off a__ = [EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2] @classmethod def _A ( cls :str ) -> int: '''simple docstring''' snake_case_ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en" , tgt_lang="fr" ) snake_case_ : List[str] = 1 return cls def _A ( self :Tuple ) -> Union[str, Any]: '''simple docstring''' self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 128_006 ) self.assertEqual(self.tokenizer.get_lang_id("en" ) , 128_022 ) self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 128_076 ) self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 128_063 ) def _A ( self :Optional[int] ) -> List[str]: '''simple docstring''' snake_case_ : Dict = self.tokenizer.get_vocab() self.assertEqual(len(lowerCAmelCase__ ) , self.tokenizer.vocab_size ) self.assertEqual(vocab["<unk>"] , 3 ) self.assertIn(self.tokenizer.get_lang_token("en" ) , lowerCAmelCase__ ) def _A ( self :Any ) -> Dict: '''simple docstring''' snake_case_ : List[str] = "en" snake_case_ : Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ ) def _A ( self :Union[str, Any] ) -> Dict: '''simple docstring''' self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids ) # fmt: off snake_case_ : Dict = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2] # fmt: on snake_case_ : List[str] = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) snake_case_ : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ ) def _A ( self :Tuple ) -> Tuple: '''simple docstring''' snake_case_ : Union[str, Any] = tempfile.mkdtemp() snake_case_ : int = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(lowerCAmelCase__ ) snake_case_ : List[str] = MaMaaaTokenizer.from_pretrained(lowerCAmelCase__ ) self.assertDictEqual(new_tok.lang_token_to_id , lowerCAmelCase__ ) @require_torch def _A ( self :Optional[Any] ) -> str: '''simple docstring''' snake_case_ : Union[str, Any] = "en" snake_case_ : Tuple = "fr" snake_case_ : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors="pt" ) snake_case_ : Dict = shift_tokens_right( batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: snake_case_ : str = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def _A ( self :Optional[Any] ) -> Tuple: '''simple docstring''' snake_case_ : List[str] = "mr" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) snake_case_ : int = "zh" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def _A ( self :str ) -> int: '''simple docstring''' snake_case_ : Dict = "mr" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) snake_case_ : Tuple = "zh" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def _A ( self :Optional[Any] ) -> Optional[int]: '''simple docstring''' snake_case_ : Optional[int] = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , { # en_XX, A, test, EOS "input_ids": [[128_022, 58, 4_183, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 128_006, } , )
656
1
'''simple docstring''' import argparse import json import os import re import torch from transformers import BloomConfig, BloomModel from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME from transformers.utils import logging logging.set_verbosity_info() __lowerCamelCase : Optional[int] = [ '''word_embeddings_layernorm.weight''', '''word_embeddings_layernorm.bias''', '''input_layernorm.weight''', '''input_layernorm.bias''', '''post_attention_layernorm.weight''', '''post_attention_layernorm.bias''', '''self_attention.dense.bias''', '''mlp.dense_4h_to_h.bias''', '''ln_f.weight''', '''ln_f.bias''', ] __lowerCamelCase : Dict = [ '''mlp.dense_4h_to_h.weight''', '''self_attention.dense.weight''', ] def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Optional[int]: """simple docstring""" snake_case_ : Optional[int] = { "word_embeddings.weight": "word_embeddings.weight", "word_embeddings.norm.weight": "word_embeddings_layernorm.weight", "word_embeddings.norm.bias": "word_embeddings_layernorm.bias", "weight": "ln_f.weight", "bias": "ln_f.bias", } if key in layer_rename_map: return layer_rename_map[key] # Handle transformer blocks snake_case_ : str = int(re.match(r".*layer_(\d*).*" ,__magic_name__ )[1] ) layer_number -= 3 return F'''h.{layer_number}.''' + key def __UpperCAmelCase ( __magic_name__ )-> Dict: """simple docstring""" if dtype == torch.bool: return 1 / 8 snake_case_ : str = re.search(r"[^\d](\d+)$" ,str(__magic_name__ ) ) if bit_search is None: raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''' ) snake_case_ : Tuple = int(bit_search.groups()[0] ) return bit_size // 8 def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> Tuple: """simple docstring""" if bloom_config_file == "": snake_case_ : str = BloomConfig() else: snake_case_ : Tuple = BloomConfig.from_json_file(__magic_name__ ) if shard_model: snake_case_ : str = os.listdir(__magic_name__ ) snake_case_ : List[str] = sorted(filter(lambda __magic_name__ : s.startswith("layer" ) and "model_00" in s ,__magic_name__ ) ) snake_case_ : List[Any] = {"weight_map": {}, "metadata": {}} snake_case_ : Union[str, Any] = 0 snake_case_ : Tuple = None snake_case_ : Optional[Any] = BloomConfig() for j, file in enumerate(__magic_name__ ): print("Processing file: {}".format(__magic_name__ ) ) snake_case_ : List[Any] = None for i in range(__magic_name__ ): # load all TP files snake_case_ : List[Any] = file.replace("model_00" ,F'''model_0{i}''' ) snake_case_ : Optional[Any] = torch.load(os.path.join(__magic_name__ ,__magic_name__ ) ,map_location="cpu" ) # Rename keys in the transformers names snake_case_ : Union[str, Any] = list(temp.keys() ) for key in keys: snake_case_ : List[str] = temp.pop(__magic_name__ ) if tensors is None: snake_case_ : Union[str, Any] = temp else: for key in tensors.keys(): if any(key.endswith(__magic_name__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel snake_case_ : Optional[Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks snake_case_ : List[Any] = torch.cat([tensors[key], temp[key]] ,dim=__magic_name__ ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(__magic_name__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): snake_case_ : int = tensors[key] / pretraining_tp torch.save( __magic_name__ ,os.path.join( __magic_name__ ,"pytorch_model_{}-of-{}.bin".format(str(j + 1 ).zfill(5 ) ,str(len(__magic_name__ ) ).zfill(5 ) ) ,) ,) for key in tensors.keys(): snake_case_ : Any = tensors[key] total_size += value.numel() * get_dtype_size(value.dtype ) if key not in index_dict["weight_map"]: snake_case_ : int = "pytorch_model_{}-of-{}.bin".format( str(j + 1 ).zfill(5 ) ,str(len(__magic_name__ ) ).zfill(5 ) ) snake_case_ : int = BloomConfig() snake_case_ : Dict = pytorch_dump_folder_path + "/" + CONFIG_NAME snake_case_ : Optional[Any] = total_size with open(__magic_name__ ,"w" ,encoding="utf-8" ) as f: f.write(config.to_json_string() ) with open(os.path.join(__magic_name__ ,WEIGHTS_NAME + ".index.json" ) ,"w" ,encoding="utf-8" ) as f: snake_case_ : Any = json.dumps(__magic_name__ ,indent=2 ,sort_keys=__magic_name__ ) + "\n" f.write(__magic_name__ ) else: snake_case_ : Optional[Any] = BloomModel(__magic_name__ ) snake_case_ : str = os.listdir(__magic_name__ ) snake_case_ : Optional[int] = sorted(filter(lambda __magic_name__ : s.startswith("layer" ) and "model_00" in s ,__magic_name__ ) ) snake_case_ : List[Any] = None for i, file in enumerate(__magic_name__ ): snake_case_ : Tuple = None for i in range(__magic_name__ ): # load all TP files snake_case_ : Tuple = file.replace("model_00" ,F'''model_0{i}''' ) snake_case_ : Dict = torch.load(os.path.join(__magic_name__ ,__magic_name__ ) ,map_location="cpu" ) # Rename keys in the transformers names snake_case_ : Tuple = list(temp.keys() ) for key in keys: snake_case_ : Tuple = temp.pop(__magic_name__ ) if tensors is None: snake_case_ : str = temp else: for key in tensors.keys(): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) if any(key.endswith(__magic_name__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel snake_case_ : Tuple = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks snake_case_ : Union[str, Any] = torch.cat([tensors[key], temp[key]] ,dim=__magic_name__ ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(__magic_name__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): snake_case_ : List[str] = tensors[key] / pretraining_tp snake_case_ : Optional[Any] = model.load_state_dict(__magic_name__ ,strict=__magic_name__ ) assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected''' if missing_keys is None: snake_case_ : Dict = set(other_keys.missing_keys ) else: snake_case_ : Optional[int] = missing_keys.intersection(set(other_keys.missing_keys ) ) assert not missing_keys, F'''The keys {missing_keys} are missing''' # Save pytorch-model os.makedirs(__magic_name__ ,exist_ok=__magic_name__ ) snake_case_ : Dict = pytorch_dump_folder_path + "/" + WEIGHTS_NAME snake_case_ : Optional[Any] = pytorch_dump_folder_path + "/" + CONFIG_NAME print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' ) if config.torch_dtype is not None: snake_case_ : int = model.to(config.torch_dtype ) torch.save(model.state_dict() ,__magic_name__ ) print(F'''Save configuration file to {pytorch_config_dump_path}''' ) with open(__magic_name__ ,"w" ,encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __lowerCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--bloom_checkpoint_path''', default=None, type=str, required=True, help='''Path to the Megatron-LM checkpoint path.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--bloom_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--shard_model''', action='''store_true''', help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''', ) parser.add_argument( '''--pretraining_tp''', default=4, type=int, help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''', ) __lowerCamelCase : Union[str, Any] = parser.parse_args() convert_bloom_checkpoint_to_pytorch( args.bloom_checkpoint_path, args.bloom_config_file, args.pytorch_dump_folder_path, args.shard_model, args.pretraining_tp, )
656
'''simple docstring''' import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py __lowerCamelCase : str = '''.''' # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) __lowerCamelCase : Tuple = [ '''Assert''', '''AssignVariableOp''', '''EmptyTensorList''', '''MergeV2Checkpoints''', '''ReadVariableOp''', '''ResourceGather''', '''RestoreV2''', '''SaveV2''', '''ShardedFilename''', '''StatefulPartitionedCall''', '''StaticRegexFullMatch''', '''VarHandleOp''', ] def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[str]: """simple docstring""" snake_case_ : Tuple = SavedModel() snake_case_ : Dict = [] with open(os.path.join(__magic_name__ ,"utils" ,"tf_ops" ,"onnx.json" ) ) as f: snake_case_ : Dict = json.load(__magic_name__ )["opsets"] for i in range(1 ,opset + 1 ): onnx_ops.extend(onnx_opsets[str(__magic_name__ )] ) with open(__magic_name__ ,"rb" ) as f: saved_model.ParseFromString(f.read() ) snake_case_ : Tuple = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want snake_case_ : str = sorted(__magic_name__ ) snake_case_ : Optional[int] = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(__magic_name__ ) if strict and len(__magic_name__ ) > 0: raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops ) elif len(__magic_name__ ) > 0: print(F'''Found the following incompatible ops for the opset {opset}:''' ) print(*__magic_name__ ,sep="\n" ) else: print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' ) if __name__ == "__main__": __lowerCamelCase : Any = argparse.ArgumentParser() parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''') parser.add_argument( '''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.''' ) parser.add_argument( '''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.''' ) parser.add_argument( '''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)''' ) __lowerCamelCase : Dict = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
656
1
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def __UpperCAmelCase ( __magic_name__ )-> List[Any]: """simple docstring""" snake_case_ : Union[str, Any] = filter(lambda __magic_name__ : p.requires_grad ,model.parameters() ) snake_case_ : List[str] = sum([np.prod(p.size() ) for p in model_parameters] ) return params __lowerCamelCase : Optional[Any] = logging.getLogger(__name__) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Union[str, Any]: """simple docstring""" if metric == "rouge2": snake_case_ : List[str] = "{val_avg_rouge2:.4f}-{step_count}" elif metric == "bleu": snake_case_ : str = "{val_avg_bleu:.4f}-{step_count}" elif metric == "em": snake_case_ : str = "{val_avg_em:.4f}-{step_count}" elif metric == "loss": snake_case_ : Any = "{val_avg_loss:.4f}-{step_count}" else: raise NotImplementedError( F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this''' " function." ) snake_case_ : Optional[Any] = ModelCheckpoint( dirpath=__magic_name__ ,filename=__magic_name__ ,monitor=F'''val_{metric}''' ,mode="max" ,save_top_k=1 ,every_n_epochs=1 ,) return checkpoint_callback def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Optional[int]: """simple docstring""" return EarlyStopping( monitor=F'''val_{metric}''' ,mode="min" if "loss" in metric else "max" ,patience=__magic_name__ ,verbose=__magic_name__ ,) class A_ (pl.Callback ): """simple docstring""" def _A ( self :List[str] , lowerCAmelCase__ :int , lowerCAmelCase__ :Any ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[int] = {F'''lr_group_{i}''': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(lowerCAmelCase__ ) @rank_zero_only def _A ( self :int , lowerCAmelCase__ :pl.Trainer , lowerCAmelCase__ :pl.LightningModule , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict=True ) -> None: '''simple docstring''' logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' ) snake_case_ : List[Any] = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} ) # Log results snake_case_ : Tuple = Path(pl_module.hparams.output_dir ) if type_path == "test": snake_case_ : str = od / "test_results.txt" snake_case_ : List[str] = od / "test_generations.txt" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. snake_case_ : Dict = od / F'''{type_path}_results/{trainer.global_step:05d}.txt''' snake_case_ : Optional[int] = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt''' results_file.parent.mkdir(exist_ok=lowerCAmelCase__ ) generations_file.parent.mkdir(exist_ok=lowerCAmelCase__ ) with open(lowerCAmelCase__ , "a+" ) as writer: for key in sorted(lowerCAmelCase__ ): if key in ["log", "progress_bar", "preds"]: continue snake_case_ : Dict = metrics[key] if isinstance(lowerCAmelCase__ , torch.Tensor ): snake_case_ : str = val.item() snake_case_ : Optional[int] = F'''{key}: {val:.6f}\n''' writer.write(lowerCAmelCase__ ) if not save_generations: return if "preds" in metrics: snake_case_ : str = "\n".join(metrics["preds"] ) generations_file.open("w+" ).write(lowerCAmelCase__ ) @rank_zero_only def _A ( self :Optional[int] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[Any] ) -> Tuple: '''simple docstring''' try: snake_case_ : Dict = pl_module.model.model.num_parameters() except AttributeError: snake_case_ : Dict = pl_module.model.num_parameters() snake_case_ : Optional[Any] = count_trainable_parameters(lowerCAmelCase__ ) # mp stands for million parameters trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} ) @rank_zero_only def _A ( self :Any , lowerCAmelCase__ :pl.Trainer , lowerCAmelCase__ :pl.LightningModule ) -> int: '''simple docstring''' save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(lowerCAmelCase__ , lowerCAmelCase__ , "test" ) @rank_zero_only def _A ( self :Dict , lowerCAmelCase__ :pl.Trainer , lowerCAmelCase__ :Any ) -> Tuple: '''simple docstring''' save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
656
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal __lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__) __lowerCamelCase : List[str] = ['''names''', '''prefix'''] __lowerCamelCase : int = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols'''] __lowerCamelCase : str = ['''encoding_errors''', '''on_bad_lines'''] __lowerCamelCase : Optional[Any] = ['''date_format'''] @dataclass class A_ (datasets.BuilderConfig ): """simple docstring""" a__ = "," a__ = None a__ = "infer" a__ = None a__ = None a__ = None a__ = None a__ = None a__ = True a__ = None a__ = None a__ = None a__ = None a__ = False a__ = None a__ = None a__ = None a__ = True a__ = True a__ = False a__ = True a__ = None a__ = "." a__ = None a__ = '"' a__ = 0 a__ = None a__ = None a__ = None a__ = None a__ = True a__ = True a__ = 0 a__ = True a__ = False a__ = None a__ = 10000 a__ = None a__ = "strict" a__ = "error" a__ = None def _A ( self :List[str] ) -> Any: '''simple docstring''' if self.delimiter is not None: snake_case_ : Tuple = self.delimiter if self.column_names is not None: snake_case_ : List[Any] = self.column_names @property def _A ( self :Optional[Any] ) -> int: '''simple docstring''' snake_case_ : Optional[int] = { "sep": self.sep, "header": self.header, "names": self.names, "index_col": self.index_col, "usecols": self.usecols, "prefix": self.prefix, "mangle_dupe_cols": self.mangle_dupe_cols, "engine": self.engine, "converters": self.converters, "true_values": self.true_values, "false_values": self.false_values, "skipinitialspace": self.skipinitialspace, "skiprows": self.skiprows, "nrows": self.nrows, "na_values": self.na_values, "keep_default_na": self.keep_default_na, "na_filter": self.na_filter, "verbose": self.verbose, "skip_blank_lines": self.skip_blank_lines, "thousands": self.thousands, "decimal": self.decimal, "lineterminator": self.lineterminator, "quotechar": self.quotechar, "quoting": self.quoting, "escapechar": self.escapechar, "comment": self.comment, "encoding": self.encoding, "dialect": self.dialect, "error_bad_lines": self.error_bad_lines, "warn_bad_lines": self.warn_bad_lines, "skipfooter": self.skipfooter, "doublequote": self.doublequote, "memory_map": self.memory_map, "float_precision": self.float_precision, "chunksize": self.chunksize, "encoding_errors": self.encoding_errors, "on_bad_lines": self.on_bad_lines, "date_format": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class A_ (datasets.ArrowBasedBuilder ): """simple docstring""" a__ = CsvConfig def _A ( self :Optional[Any] ) -> Optional[Any]: '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def _A ( self :Tuple , lowerCAmelCase__ :Dict ) -> List[Any]: '''simple docstring''' if not self.config.data_files: raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) snake_case_ : Optional[Any] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(lowerCAmelCase__ , (str, list, tuple) ): snake_case_ : int = data_files if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): snake_case_ : List[str] = [files] snake_case_ : Tuple = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] snake_case_ : str = [] for split_name, files in data_files.items(): if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): snake_case_ : str = [files] snake_case_ : Any = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files] splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) ) return splits def _A ( self :List[Any] , lowerCAmelCase__ :pa.Table ) -> pa.Table: '''simple docstring''' if self.config.features is not None: snake_case_ : int = self.config.features.arrow_schema if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ): # cheaper cast snake_case_ : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example snake_case_ : Dict = table_cast(lowerCAmelCase__ , lowerCAmelCase__ ) return pa_table def _A ( self :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]: '''simple docstring''' snake_case_ : Tuple = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str snake_case_ : str = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ): snake_case_ : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(lowerCAmelCase__ ): snake_case_ : Optional[int] = pa.Table.from_pandas(lowerCAmelCase__ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ ) except ValueError as e: logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCAmelCase__ )}: {e}''' ) raise
656
1
'''simple docstring''' import warnings from contextlib import contextmanager from ....processing_utils import ProcessorMixin class A_ (a_ ): """simple docstring""" a__ = '''MCTCTFeatureExtractor''' a__ = '''AutoTokenizer''' def __init__( self :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[int] ) -> Tuple: '''simple docstring''' super().__init__(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : str = self.feature_extractor snake_case_ : Tuple = False def __call__( self :List[str] , *lowerCAmelCase__ :Union[str, Any] , **lowerCAmelCase__ :Tuple ) -> List[Any]: '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*lowerCAmelCase__ , **lowerCAmelCase__ ) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." ) snake_case_ : str = kwargs.pop("raw_speech" ) else: snake_case_ : Any = kwargs.pop("audio" , lowerCAmelCase__ ) snake_case_ : Any = kwargs.pop("sampling_rate" , lowerCAmelCase__ ) snake_case_ : List[Any] = kwargs.pop("text" , lowerCAmelCase__ ) if len(lowerCAmelCase__ ) > 0: snake_case_ : List[str] = args[0] snake_case_ : Dict = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if audio is not None: snake_case_ : Optional[Any] = self.feature_extractor(lowerCAmelCase__ , *lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , **lowerCAmelCase__ ) if text is not None: snake_case_ : int = self.tokenizer(lowerCAmelCase__ , **lowerCAmelCase__ ) if text is None: return inputs elif audio is None: return encodings else: snake_case_ : Any = encodings["input_ids"] return inputs def _A ( self :Union[str, Any] , *lowerCAmelCase__ :int , **lowerCAmelCase__ :Optional[int] ) -> Tuple: '''simple docstring''' return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ ) def _A ( self :Any , *lowerCAmelCase__ :Tuple , **lowerCAmelCase__ :Dict ) -> List[str]: '''simple docstring''' if self._in_target_context_manager: return self.current_processor.pad(*lowerCAmelCase__ , **lowerCAmelCase__ ) snake_case_ : Tuple = kwargs.pop("input_features" , lowerCAmelCase__ ) snake_case_ : List[str] = kwargs.pop("labels" , lowerCAmelCase__ ) if len(lowerCAmelCase__ ) > 0: snake_case_ : Union[str, Any] = args[0] snake_case_ : List[Any] = args[1:] if input_features is not None: snake_case_ : Optional[int] = self.feature_extractor.pad(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) if labels is not None: snake_case_ : Tuple = self.tokenizer.pad(lowerCAmelCase__ , **lowerCAmelCase__ ) if labels is None: return input_features elif input_features is None: return labels else: snake_case_ : List[str] = labels["input_ids"] return input_features def _A ( self :Tuple , *lowerCAmelCase__ :List[Any] , **lowerCAmelCase__ :Any ) -> Any: '''simple docstring''' return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ ) @contextmanager def _A ( self :List[Any] ) -> Union[str, Any]: '''simple docstring''' warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call." ) snake_case_ : Any = True snake_case_ : str = self.tokenizer yield snake_case_ : List[Any] = self.feature_extractor snake_case_ : Union[str, Any] = False
656
'''simple docstring''' import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A_ (a_ , unittest.TestCase ): """simple docstring""" a__ = MgpstrTokenizer a__ = False a__ = {} a__ = False def _A ( self :List[str] ) -> List[str]: '''simple docstring''' super().setUp() # fmt: off snake_case_ : Dict = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"] # fmt: on snake_case_ : List[str] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCAmelCase__ ) + "\n" ) def _A ( self :Optional[Any] , **lowerCAmelCase__ :Optional[Any] ) -> Dict: '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def _A ( self :Dict , lowerCAmelCase__ :Any ) -> str: '''simple docstring''' snake_case_ : Dict = "tester" snake_case_ : Tuple = "tester" return input_text, output_text @unittest.skip("MGP-STR always lower cases letters." ) def _A ( self :Dict ) -> str: '''simple docstring''' pass def _A ( self :Tuple ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[str] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): snake_case_ : Tuple = "[SPECIAL_TOKEN]" tokenizer.add_special_tokens({"cls_token": special_token} ) snake_case_ : str = tokenizer.encode([special_token] , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(len(lowerCAmelCase__ ) , 1 ) snake_case_ : Tuple = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) self.assertTrue(special_token not in decoded ) def _A ( self :int ) -> List[str]: '''simple docstring''' snake_case_ : Dict = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): snake_case_, snake_case_ : str = self.get_input_output_texts(lowerCAmelCase__ ) snake_case_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__ ) snake_case_ : List[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) snake_case_ : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) snake_case_ : List[str] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ ) self.assertNotEqual(len(lowerCAmelCase__ ) , 0 ) snake_case_ : List[str] = tokenizer.decode(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertEqual(text_a.replace(" " , "" ) , lowerCAmelCase__ ) @unittest.skip("MGP-STR tokenizer only handles one sequence." ) def _A ( self :Union[str, Any] ) -> Any: '''simple docstring''' pass @unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" ) def _A ( self :int ) -> Dict: '''simple docstring''' pass
656
1
'''simple docstring''' # Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position __lowerCamelCase : Any = '''2.13.1''' import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse('''3.7'''): raise ImportWarning( '''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.''' ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( '''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n''' '''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.''' ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip __lowerCamelCase : Tuple = concatenate_datasets __lowerCamelCase : Optional[Any] = DownloadConfig __lowerCamelCase : str = DownloadManager __lowerCamelCase : Optional[Any] = DownloadMode __lowerCamelCase : Dict = DownloadConfig __lowerCamelCase : Tuple = DownloadMode __lowerCamelCase : int = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
656
'''simple docstring''' from __future__ import annotations import math import numpy as np from numpy.linalg import norm def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float: """simple docstring""" return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__magic_name__ ,__magic_name__ ) ) ) def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> list[list[list[float] | float]]: """simple docstring""" if dataset.ndim != value_array.ndim: snake_case_ : int = ( "Wrong input data's dimensions... " F'''dataset : {dataset.ndim}, value_array : {value_array.ndim}''' ) raise ValueError(__magic_name__ ) try: if dataset.shape[1] != value_array.shape[1]: snake_case_ : Dict = ( "Wrong input data's shape... " F'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}''' ) raise ValueError(__magic_name__ ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError("Wrong shape" ) if dataset.dtype != value_array.dtype: snake_case_ : Dict = ( "Input data have different datatype... " F'''dataset : {dataset.dtype}, value_array : {value_array.dtype}''' ) raise TypeError(__magic_name__ ) snake_case_ : Optional[int] = [] for value in value_array: snake_case_ : List[str] = euclidean(__magic_name__ ,dataset[0] ) snake_case_ : int = dataset[0].tolist() for dataset_value in dataset[1:]: snake_case_ : Optional[Any] = euclidean(__magic_name__ ,__magic_name__ ) if dist > temp_dist: snake_case_ : Tuple = temp_dist snake_case_ : Optional[int] = dataset_value.tolist() answer.append([vector, dist] ) return answer def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float: """simple docstring""" return np.dot(__magic_name__ ,__magic_name__ ) / (norm(__magic_name__ ) * norm(__magic_name__ )) if __name__ == "__main__": import doctest doctest.testmod()
656
1
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class A_ (unittest.TestCase ): """simple docstring""" @slow def _A ( self :str ) -> str: '''simple docstring''' snake_case_ : Union[str, Any] = XLMRobertaModel.from_pretrained("xlm-roberta-base" ) snake_case_ : Dict = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] ) # The dog is cute and lives in the garden house snake_case_ : List[str] = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim snake_case_ : Union[str, Any] = torch.tensor( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): snake_case_ : Optional[int] = model(lowerCAmelCase__ )["last_hidden_state"].detach() self.assertEqual(output.shape , lowerCAmelCase__ ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , lowerCAmelCase__ , atol=1E-3 ) ) @slow def _A ( self :Optional[int] ) -> int: '''simple docstring''' snake_case_ : List[Any] = XLMRobertaModel.from_pretrained("xlm-roberta-large" ) snake_case_ : Union[str, Any] = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] ) # The dog is cute and lives in the garden house snake_case_ : str = torch.Size((1, 12, 1_024) ) # batch_size, sequence_length, embedding_vector_dim snake_case_ : Optional[Any] = torch.tensor( [[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): snake_case_ : List[Any] = model(lowerCAmelCase__ )["last_hidden_state"].detach() self.assertEqual(output.shape , lowerCAmelCase__ ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] , lowerCAmelCase__ , atol=1E-3 ) )
656
'''simple docstring''' import fire from utils import calculate_rouge, save_json def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__=None ,**__magic_name__ )-> Optional[Any]: """simple docstring""" snake_case_ : int = [x.strip() for x in open(__magic_name__ ).readlines()] snake_case_ : Optional[int] = [x.strip() for x in open(__magic_name__ ).readlines()][: len(__magic_name__ )] snake_case_ : List[Any] = calculate_rouge(__magic_name__ ,__magic_name__ ,**__magic_name__ ) if save_path is not None: save_json(__magic_name__ ,__magic_name__ ,indent=__magic_name__ ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
656
1