code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
def A(__a: int ): lowerCAmelCase_ = int(__a ) if n_element < 1: lowerCAmelCase_ = ValueError("a should be a positive number" ) raise my_error lowerCAmelCase_ = [1] lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = (0, 0, 0) lowerCAmelCase_ = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": lowerCamelCase__ = input('''Enter the last number (nth term) of the Hamming Number Series: ''') print('''Formula of Hamming Number Series => 2^i * 3^j * 5^k''') lowerCamelCase__ = hamming(int(n)) print('''-----------------------------------------------------''') print(F'''The list with nth numbers is: {hamming_numbers}''') print('''-----------------------------------------------------''')
22
from __future__ import annotations def A(__a: dict , __a: str ): lowerCAmelCase_ , lowerCAmelCase_ = set(__a ), [start] while stack: lowerCAmelCase_ = stack.pop() explored.add(__a ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(__a ) return explored lowerCamelCase__ = { '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
22
1
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def A(__a: Dict ): lowerCAmelCase_ , lowerCAmelCase_ = image.size lowerCAmelCase_ , lowerCAmelCase_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 lowerCAmelCase_ = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) lowerCAmelCase_ = np.array(__a ).astype(np.floataa ) / 255.0 lowerCAmelCase_ = image[None].transpose(0 , 3 , 1 , 2 ) lowerCAmelCase_ = torch.from_numpy(__a ) return 2.0 * image - 1.0 class __magic_name__ (__lowercase ): def __init__( self , _a , _a , _a , ) -> int: super().__init__() self.register_modules(vqvae=_a , unet=_a , scheduler=_a ) @torch.no_grad() def __call__( self , _a = None , _a = 1 , _a = 100 , _a = 0.0 , _a = None , _a = "pil" , _a = True , ) -> Union[Tuple, ImagePipelineOutput]: if isinstance(_a , PIL.Image.Image ): lowerCAmelCase_ = 1 elif isinstance(_a , torch.Tensor ): lowerCAmelCase_ = image.shape[0] else: raise ValueError(f"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_a )}" ) if isinstance(_a , PIL.Image.Image ): lowerCAmelCase_ = preprocess(_a ) lowerCAmelCase_ , lowerCAmelCase_ = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image lowerCAmelCase_ = (batch_size, self.unet.config.in_channels // 2, height, width) lowerCAmelCase_ = next(self.unet.parameters() ).dtype lowerCAmelCase_ = randn_tensor(_a , generator=_a , device=self.device , dtype=_a ) lowerCAmelCase_ = image.to(device=self.device , dtype=_a ) # set timesteps and move to the correct device self.scheduler.set_timesteps(_a , device=self.device ) lowerCAmelCase_ = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler lowerCAmelCase_ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] lowerCAmelCase_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowerCAmelCase_ = {} if accepts_eta: lowerCAmelCase_ = eta for t in self.progress_bar(_a ): # concat latents and low resolution image in the channel dimension. lowerCAmelCase_ = torch.cat([latents, image] , dim=1 ) lowerCAmelCase_ = self.scheduler.scale_model_input(_a , _a ) # predict the noise residual lowerCAmelCase_ = self.unet(_a , _a ).sample # compute the previous noisy sample x_t -> x_t-1 lowerCAmelCase_ = self.scheduler.step(_a , _a , _a , **_a ).prev_sample # decode the image latents with the VQVAE lowerCAmelCase_ = self.vqvae.decode(_a ).sample lowerCAmelCase_ = torch.clamp(_a , -1.0 , 1.0 ) lowerCAmelCase_ = image / 2 + 0.5 lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCAmelCase_ = self.numpy_to_pil(_a ) if not return_dict: return (image,) return ImagePipelineOutput(images=_a )
22
def A(__a: Tuple ): lowerCAmelCase_ = len(__a ) while cur > 1: # Find the maximum number in arr lowerCAmelCase_ = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi lowerCAmelCase_ = arr[mi::-1] + arr[mi + 1 : len(__a )] # Reverse whole list lowerCAmelCase_ = arr[cur - 1 :: -1] + arr[cur : len(__a )] cur -= 1 return arr if __name__ == "__main__": lowerCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip() lowerCamelCase__ = [int(item) for item in user_input.split(''',''')] print(pancake_sort(unsorted))
22
1
lowerCamelCase__ = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' lowerCamelCase__ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] lowerCamelCase__ = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
22
import string from math import logaa def A(__a: str , __a: str ): lowerCAmelCase_ = document.translate( str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" ) lowerCAmelCase_ = document_without_punctuation.split(" " ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def A(__a: str , __a: str ): lowerCAmelCase_ = corpus.lower().translate( str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with '' lowerCAmelCase_ = corpus_without_punctuation.split("\n" ) lowerCAmelCase_ = term.lower() return (len([doc for doc in docs if term in doc] ), len(__a )) def A(__a: int , __a: int , __a: List[Any]=False ): if smoothing: if n == 0: raise ValueError("log10(0) is undefined." ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("df must be > 0" ) elif n == 0: raise ValueError("log10(0) is undefined." ) return round(logaa(n / df ) , 3 ) def A(__a: int , __a: int ): return round(tf * idf , 3 )
22
1
import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. lowerCamelCase__ = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''} @is_pipeline_test class __magic_name__ (unittest.TestCase ): lowerCamelCase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING lowerCamelCase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: lowerCamelCase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: lowerCamelCase__ = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def __a ( self , _a , _a , _a ) -> Optional[int]: lowerCAmelCase_ = ZeroShotClassificationPipeline( model=_a , tokenizer=_a , candidate_labels=["polics", "health"] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def __a ( self , _a , _a ) -> Any: lowerCAmelCase_ = classifier("Who are you voting for in 2020?" , candidate_labels="politics" ) self.assertEqual(_a , {"sequence": ANY(_a ), "labels": [ANY(_a )], "scores": [ANY(_a )]} ) # No kwarg lowerCAmelCase_ = classifier("Who are you voting for in 2020?" , ["politics"] ) self.assertEqual(_a , {"sequence": ANY(_a ), "labels": [ANY(_a )], "scores": [ANY(_a )]} ) lowerCAmelCase_ = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] ) self.assertEqual(_a , {"sequence": ANY(_a ), "labels": [ANY(_a )], "scores": [ANY(_a )]} ) lowerCAmelCase_ = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" ) self.assertEqual( _a , {"sequence": ANY(_a ), "labels": [ANY(_a ), ANY(_a )], "scores": [ANY(_a ), ANY(_a )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 ) lowerCAmelCase_ = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] ) self.assertEqual( _a , {"sequence": ANY(_a ), "labels": [ANY(_a ), ANY(_a )], "scores": [ANY(_a ), ANY(_a )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 ) lowerCAmelCase_ = classifier( "Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" ) self.assertEqual(_a , {"sequence": ANY(_a ), "labels": [ANY(_a )], "scores": [ANY(_a )]} ) # https://github.com/huggingface/transformers/issues/13846 lowerCAmelCase_ = classifier(["I am happy"] , ["positive", "negative"] ) self.assertEqual( _a , [ {"sequence": ANY(_a ), "labels": [ANY(_a ), ANY(_a )], "scores": [ANY(_a ), ANY(_a )]} for i in range(1 ) ] , ) lowerCAmelCase_ = classifier(["I am happy", "I am sad"] , ["positive", "negative"] ) self.assertEqual( _a , [ {"sequence": ANY(_a ), "labels": [ANY(_a ), ANY(_a )], "scores": [ANY(_a ), ANY(_a )]} for i in range(2 ) ] , ) with self.assertRaises(_a ): classifier("" , candidate_labels="politics" ) with self.assertRaises(_a ): classifier(_a , candidate_labels="politics" ) with self.assertRaises(_a ): classifier("Who are you voting for in 2020?" , candidate_labels="" ) with self.assertRaises(_a ): classifier("Who are you voting for in 2020?" , candidate_labels=_a ) with self.assertRaises(_a ): classifier( "Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , ) with self.assertRaises(_a ): classifier( "Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=_a , ) self.run_entailment_id(_a ) def __a ( self , _a ) -> List[Any]: lowerCAmelCase_ = zero_shot_classifier.model.config lowerCAmelCase_ = config.labelaid lowerCAmelCase_ = zero_shot_classifier.entailment_id lowerCAmelCase_ = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2} self.assertEqual(zero_shot_classifier.entailment_id , -1 ) lowerCAmelCase_ = {"entailment": 0, "neutral": 1, "contradiction": 2} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) lowerCAmelCase_ = {"ENTAIL": 0, "NON-ENTAIL": 1} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) lowerCAmelCase_ = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0} self.assertEqual(zero_shot_classifier.entailment_id , 2 ) lowerCAmelCase_ = original_labelaid self.assertEqual(_a , zero_shot_classifier.entailment_id ) @require_torch def __a ( self ) -> Optional[int]: lowerCAmelCase_ = pipeline( "zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( "Who are you voting for in 2020?" * 100 , candidate_labels=["politics", "public health", "science"] ) @require_torch def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = pipeline( "zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , ) lowerCAmelCase_ = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(_a ) , { "sequence": "Who are you voting for in 2020?", "labels": ["science", "public health", "politics"], "scores": [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @require_tf def __a ( self ) -> List[str]: lowerCAmelCase_ = pipeline( "zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , ) lowerCAmelCase_ = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(_a ) , { "sequence": "Who are you voting for in 2020?", "labels": ["science", "public health", "politics"], "scores": [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @slow @require_torch def __a ( self ) -> List[str]: lowerCAmelCase_ = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" ) lowerCAmelCase_ = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(_a ) , { "sequence": "Who are you voting for in 2020?", "labels": ["politics", "public health", "science"], "scores": [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) lowerCAmelCase_ = zero_shot_classifier( "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks" " in an encoder-decoder configuration. The best performing models also connect the encoder and decoder" " through an attention mechanism. We propose a new simple network architecture, the Transformer, based" " solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two" " machine translation tasks show these models to be superior in quality while being more parallelizable" " and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014" " English-to-German translation task, improving over the existing best results, including ensembles by" " over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new" " single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small" " fraction of the training costs of the best models from the literature. We show that the Transformer" " generalizes well to other tasks by applying it successfully to English constituency parsing both with" " large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=_a , ) self.assertEqual( nested_simplify(_a ) , { "sequence": ( "The dominant sequence transduction models are based on complex recurrent or convolutional neural" " networks in an encoder-decoder configuration. The best performing models also connect the" " encoder and decoder through an attention mechanism. We propose a new simple network" " architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence" " and convolutions entirely. Experiments on two machine translation tasks show these models to be" " superior in quality while being more parallelizable and requiring significantly less time to" " train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task," " improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014" " English-to-French translation task, our model establishes a new single-model state-of-the-art" " BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training" " costs of the best models from the literature. We show that the Transformer generalizes well to" " other tasks by applying it successfully to English constituency parsing both with large and" " limited training data." ), "labels": ["translation", "machine learning", "vision", "statistics"], "scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , ) @slow @require_tf def __a ( self ) -> Dict: lowerCAmelCase_ = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" ) lowerCAmelCase_ = zero_shot_classifier( "Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(_a ) , { "sequence": "Who are you voting for in 2020?", "labels": ["politics", "public health", "science"], "scores": [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) lowerCAmelCase_ = zero_shot_classifier( "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks" " in an encoder-decoder configuration. The best performing models also connect the encoder and decoder" " through an attention mechanism. We propose a new simple network architecture, the Transformer, based" " solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two" " machine translation tasks show these models to be superior in quality while being more parallelizable" " and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014" " English-to-German translation task, improving over the existing best results, including ensembles by" " over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new" " single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small" " fraction of the training costs of the best models from the literature. We show that the Transformer" " generalizes well to other tasks by applying it successfully to English constituency parsing both with" " large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=_a , ) self.assertEqual( nested_simplify(_a ) , { "sequence": ( "The dominant sequence transduction models are based on complex recurrent or convolutional neural" " networks in an encoder-decoder configuration. The best performing models also connect the" " encoder and decoder through an attention mechanism. We propose a new simple network" " architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence" " and convolutions entirely. Experiments on two machine translation tasks show these models to be" " superior in quality while being more parallelizable and requiring significantly less time to" " train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task," " improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014" " English-to-French translation task, our model establishes a new single-model state-of-the-art" " BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training" " costs of the best models from the literature. We show that the Transformer generalizes well to" " other tasks by applying it successfully to English constituency parsing both with large and" " limited training data." ), "labels": ["translation", "machine learning", "vision", "statistics"], "scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , )
22
import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef lowerCamelCase__ = ( '''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' ) def A(__a: str , __a: List[Any] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) return (preds == labels).mean() def A(__a: Any , __a: Any ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) lowerCAmelCase_ = simple_accuracy(__a , __a ) lowerCAmelCase_ = fa_score(y_true=__a , y_pred=__a ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def A(__a: List[str] , __a: Optional[int] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) lowerCAmelCase_ = pearsonr(__a , __a )[0] lowerCAmelCase_ = spearmanr(__a , __a )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def A(__a: Union[str, Any] , __a: Any , __a: str ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) assert len(__a ) == len(__a ), F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" if task_name == "cola": return {"mcc": matthews_corrcoef(__a , __a )} elif task_name == "sst-2": return {"acc": simple_accuracy(__a , __a )} elif task_name == "mrpc": return acc_and_fa(__a , __a ) elif task_name == "sts-b": return pearson_and_spearman(__a , __a ) elif task_name == "qqp": return acc_and_fa(__a , __a ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(__a , __a )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(__a , __a )} elif task_name == "qnli": return {"acc": simple_accuracy(__a , __a )} elif task_name == "rte": return {"acc": simple_accuracy(__a , __a )} elif task_name == "wnli": return {"acc": simple_accuracy(__a , __a )} elif task_name == "hans": return {"acc": simple_accuracy(__a , __a )} else: raise KeyError(__a ) def A(__a: int , __a: Optional[Any] , __a: Optional[Any] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) if len(__a ) != len(__a ): raise ValueError(F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" ) if task_name == "xnli": return {"acc": simple_accuracy(__a , __a )} else: raise KeyError(__a )
22
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase__ = { '''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig'''] } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ['''RemBertTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ['''RemBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RemBertForCausalLM''', '''RemBertForMaskedLM''', '''RemBertForMultipleChoice''', '''RemBertForQuestionAnswering''', '''RemBertForSequenceClassification''', '''RemBertForTokenClassification''', '''RemBertLayer''', '''RemBertModel''', '''RemBertPreTrainedModel''', '''load_tf_weights_in_rembert''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRemBertForCausalLM''', '''TFRemBertForMaskedLM''', '''TFRemBertForMultipleChoice''', '''TFRemBertForQuestionAnswering''', '''TFRemBertForSequenceClassification''', '''TFRemBertForTokenClassification''', '''TFRemBertLayer''', '''TFRemBertModel''', '''TFRemBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert import RemBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert_fast import RemBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rembert import ( REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RemBertForCausalLM, RemBertForMaskedLM, RemBertForMultipleChoice, RemBertForQuestionAnswering, RemBertForSequenceClassification, RemBertForTokenClassification, RemBertLayer, RemBertModel, RemBertPreTrainedModel, load_tf_weights_in_rembert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rembert import ( TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFRemBertForCausalLM, TFRemBertForMaskedLM, TFRemBertForMultipleChoice, TFRemBertForQuestionAnswering, TFRemBertForSequenceClassification, TFRemBertForTokenClassification, TFRemBertLayer, TFRemBertModel, TFRemBertPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __magic_name__ (__lowercase ): lowerCamelCase__ = ['''image_processor''', '''tokenizer'''] lowerCamelCase__ = '''ViTImageProcessor''' lowerCamelCase__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self , _a=None , _a=None , **_a ) -> Tuple: lowerCAmelCase_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , _a , ) lowerCAmelCase_ = kwargs.pop("feature_extractor" ) lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_a , _a ) def __call__( self , _a=None , _a=None , _a=None , _a=None , **_a ) -> Dict: if text is None and visual_prompt is None and images is None: raise ValueError("You have to specify either text, visual prompt or images." ) if text is not None and visual_prompt is not None: raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." ) if text is not None: lowerCAmelCase_ = self.tokenizer(_a , return_tensors=_a , **_a ) if visual_prompt is not None: lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a ) if images is not None: lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a ) if visual_prompt is not None and images is not None: lowerCAmelCase_ = { "pixel_values": image_features.pixel_values, "conditional_pixel_values": prompt_features.pixel_values, } return encoding elif text is not None and images is not None: lowerCAmelCase_ = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: lowerCAmelCase_ = { "conditional_pixel_values": prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**_a ) , tensor_type=_a ) def __a ( self , *_a , **_a ) -> List[str]: return self.tokenizer.batch_decode(*_a , **_a ) def __a ( self , *_a , **_a ) -> Optional[int]: return self.tokenizer.decode(*_a , **_a ) @property def __a ( self ) -> List[str]: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _a , ) return self.image_processor_class @property def __a ( self ) -> Optional[Any]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _a , ) return self.image_processor
22
1
import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( "files" , [ ["full:README.md", "dataset_infos.json"], ["empty:README.md", "dataset_infos.json"], ["dataset_infos.json"], ["full:README.md"], ] , ) def A(__a: Any , __a: Optional[int] ): lowerCAmelCase_ = tmp_path_factory.mktemp("dset_infos_dir" ) if "full:README.md" in files: with open(dataset_infos_dir / "README.md" , "w" ) as f: f.write("---\ndataset_info:\n dataset_size: 42\n---" ) if "empty:README.md" in files: with open(dataset_infos_dir / "README.md" , "w" ) as f: f.write("" ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f: f.write("{\"default\": {\"dataset_size\": 42}}" ) lowerCAmelCase_ = DatasetInfosDict.from_directory(__a ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( "dataset_info" , [ DatasetInfo(), DatasetInfo( description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , ), ] , ) def A(__a: List[Any] , __a: DatasetInfo ): lowerCAmelCase_ = str(__a ) dataset_info.write_to_directory(__a ) lowerCAmelCase_ = DatasetInfo.from_directory(__a ) assert dataset_info == reloaded assert os.path.exists(os.path.join(__a , "dataset_info.json" ) ) def A(): lowerCAmelCase_ = DatasetInfo( description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) lowerCAmelCase_ = dataset_info._to_yaml_dict() assert sorted(__a ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) lowerCAmelCase_ = yaml.safe_dump(__a ) lowerCAmelCase_ = yaml.safe_load(__a ) assert dataset_info_yaml_dict == reloaded def A(): lowerCAmelCase_ = DatasetInfo() lowerCAmelCase_ = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( "dataset_infos_dict" , [ DatasetInfosDict(), DatasetInfosDict({"default": DatasetInfo()} ), DatasetInfosDict({"my_config_name": DatasetInfo()} ), DatasetInfosDict( { "default": DatasetInfo( description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , ) } ), DatasetInfosDict( { "v1": DatasetInfo(dataset_size=42 ), "v2": DatasetInfo(dataset_size=1337 ), } ), ] , ) def A(__a: List[str] , __a: DatasetInfosDict ): lowerCAmelCase_ = str(__a ) dataset_infos_dict.write_to_directory(__a ) lowerCAmelCase_ = DatasetInfosDict.from_directory(__a ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): lowerCAmelCase_ = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml lowerCAmelCase_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(__a , "README.md" ) )
22
import datasets lowerCamelCase__ = '''\ @InProceedings{conneau2018xnli, author = "Conneau, Alexis and Rinott, Ruty and Lample, Guillaume and Williams, Adina and Bowman, Samuel R. and Schwenk, Holger and Stoyanov, Veselin", title = "XNLI: Evaluating Cross-lingual Sentence Representations", booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", year = "2018", publisher = "Association for Computational Linguistics", location = "Brussels, Belgium", } ''' lowerCamelCase__ = '''\ XNLI is a subset of a few thousand examples from MNLI which has been translated into a 14 different languages (some low-ish resource). As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels). ''' lowerCamelCase__ = ''' Computes XNLI score which is just simple accuracy. Args: predictions: Predicted labels. references: Ground truth labels. Returns: \'accuracy\': accuracy Examples: >>> predictions = [0, 1] >>> references = [0, 1] >>> xnli_metric = datasets.load_metric("xnli") >>> results = xnli_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} ''' def A(__a: Dict , __a: Union[str, Any] ): return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ (datasets.Metric ): def __a ( self ) -> Tuple: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), "references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), } ) , codebase_urls=[] , reference_urls=[] , format="numpy" , ) def __a ( self , _a , _a ) -> List[str]: return {"accuracy": simple_accuracy(_a , _a )}
22
1
from __future__ import annotations def A(__a: list[int | float] , __a: int , __a: int ): if len(__a ) == 0: raise ValueError("find_max() arg is an empty sequence" ) if ( left >= len(__a ) or left < -len(__a ) or right >= len(__a ) or right < -len(__a ) ): raise IndexError("list index out of range" ) if left == right: return nums[left] lowerCAmelCase_ = (left + right) >> 1 # the middle lowerCAmelCase_ = find_max(__a , __a , __a ) # find max in range[left, mid] lowerCAmelCase_ = find_max(__a , mid + 1 , __a ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
22
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset lowerCamelCase__ = '''bert-base-cased''' lowerCamelCase__ = '''google/pegasus-xsum''' lowerCamelCase__ = [''' Sam ate lunch today.''', '''Sams lunch ingredients.'''] lowerCamelCase__ = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee'''] lowerCamelCase__ = '''patrickvonplaten/t5-tiny-random''' lowerCamelCase__ = '''sshleifer/bart-tiny-random''' lowerCamelCase__ = '''sshleifer/tiny-mbart''' lowerCamelCase__ = '''sshleifer/tiny-marian-en-de''' def A(__a: Path , __a: list ): lowerCAmelCase_ = "\n".join(__a ) Path(__a ).open("w" ).writelines(__a ) def A(__a: str ): for split in ["train", "val", "test"]: _dump_articles(os.path.join(__a , F"{split}.source" ) , __a ) _dump_articles(os.path.join(__a , F"{split}.target" ) , __a ) return tmp_dir class __magic_name__ (__lowercase ): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def __a ( self , _a ) -> Dict: lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a ) lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES ) lowerCAmelCase_ = 4 lowerCAmelCase_ = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated lowerCAmelCase_ , lowerCAmelCase_ = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error. lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , src_lang=_a , tgt_lang=_a , ) lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(_a , _a ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place lowerCAmelCase_ = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def __a ( self , _a ) -> str: lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a ) lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES ) lowerCAmelCase_ = 4 lowerCAmelCase_ = LegacySeqaSeqDataset( _a , data_dir=_a , type_path="train" , max_source_length=20 , max_target_length=_a , ) lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" ) lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) lowerCAmelCase_ = tmp_dir.joinpath("train.source" ).open().readlines() lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(_a , _a , 128 , _a ) lowerCAmelCase_ = {x.name for x in tmp_dir.iterdir()} lowerCAmelCase_ = {x.name for x in save_dir.iterdir()} lowerCAmelCase_ = save_dir.joinpath("train.source" ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(_a ) < len(_a ) assert len(_a ) == 1 assert len(packed_examples[0] ) == sum(len(_a ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" ) def __a ( self ) -> Any: if not FAIRSEQ_AVAILABLE: return lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=64 ) lowerCAmelCase_ = 64 lowerCAmelCase_ = ds.make_dynamic_sampler(_a , required_batch_size_multiple=_a ) lowerCAmelCase_ = [len(_a ) for x in batch_sampler] assert len(set(_a ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(_a ) == len(_a ) # no dropped or added examples lowerCAmelCase_ = DataLoader(_a , batch_sampler=_a , collate_fn=ds.collate_fn , num_workers=2 ) lowerCAmelCase_ = [] lowerCAmelCase_ = [] for batch in data_loader: lowerCAmelCase_ = batch["input_ids"].shape lowerCAmelCase_ = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple lowerCAmelCase_ = np.product(batch["input_ids"].shape ) num_src_per_batch.append(_a ) if num_src_tokens > (max_tokens * 1.1): failures.append(_a ) assert num_src_per_batch[0] == max(_a ) if failures: raise AssertionError(f"too many tokens in {len(_a )} batches" ) def __a ( self ) -> List[str]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=512 ) lowerCAmelCase_ = 2 lowerCAmelCase_ = ds.make_sortish_sampler(_a , shuffle=_a ) lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 ) lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 , sampler=_a ) lowerCAmelCase_ = tokenizer.pad_token_id def count_pad_tokens(_a , _a="input_ids" ): return [batch[k].eq(_a ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(_a , k="labels" ) ) < sum(count_pad_tokens(_a , k="labels" ) ) assert sum(count_pad_tokens(_a ) ) < sum(count_pad_tokens(_a ) ) assert len(_a ) == len(_a ) def __a ( self , _a=1000 , _a=128 ) -> str: if os.getenv("USE_REAL_DATA" , _a ): lowerCAmelCase_ = "examples/seq2seq/wmt_en_ro" lowerCAmelCase_ = max_len * 2 * 64 if not Path(_a ).joinpath("train.len" ).exists(): save_len_file(_a , _a ) else: lowerCAmelCase_ = "examples/seq2seq/test_data/wmt_en_ro" lowerCAmelCase_ = max_len * 4 save_len_file(_a , _a ) lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a ) lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , n_obs=_a , ) return ds, max_tokens, tokenizer def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset() lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=0 , add_extra_examples=_a ) ) lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=1 , add_extra_examples=_a ) ) assert idsa.intersection(_a ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def __a ( self , _a ) -> List[str]: lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a , use_fast=_a ) if tok_name == MBART_TINY: lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , ) lowerCAmelCase_ = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , ) lowerCAmelCase_ = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(_a ) == 1 if tok_name == BART_TINY else len(_a ) == 0
22
1
import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def A(__a: int , __a: Any=1 ): if n_shave_prefix_segments >= 0: return ".".join(path.split("." )[n_shave_prefix_segments:] ) else: return ".".join(path.split("." )[:n_shave_prefix_segments] ) def A(__a: int , __a: Tuple=0 ): lowerCAmelCase_ = [] for old_item in old_list: lowerCAmelCase_ = old_item.replace("in_layers.0" , "norm1" ) lowerCAmelCase_ = new_item.replace("in_layers.2" , "conv1" ) lowerCAmelCase_ = new_item.replace("out_layers.0" , "norm2" ) lowerCAmelCase_ = new_item.replace("out_layers.3" , "conv2" ) lowerCAmelCase_ = new_item.replace("emb_layers.1" , "time_emb_proj" ) lowerCAmelCase_ = new_item.replace("skip_connection" , "conv_shortcut" ) lowerCAmelCase_ = shave_segments(__a , n_shave_prefix_segments=__a ) mapping.append({"old": old_item, "new": new_item} ) return mapping def A(__a: int , __a: Union[str, Any]=0 ): lowerCAmelCase_ = [] for old_item in old_list: lowerCAmelCase_ = old_item lowerCAmelCase_ = new_item.replace("norm.weight" , "group_norm.weight" ) lowerCAmelCase_ = new_item.replace("norm.bias" , "group_norm.bias" ) lowerCAmelCase_ = new_item.replace("proj_out.weight" , "proj_attn.weight" ) lowerCAmelCase_ = new_item.replace("proj_out.bias" , "proj_attn.bias" ) lowerCAmelCase_ = shave_segments(__a , n_shave_prefix_segments=__a ) mapping.append({"old": old_item, "new": new_item} ) return mapping def A(__a: Optional[int] , __a: Any , __a: str , __a: List[Any]=None , __a: List[Any]=None , __a: Union[str, Any]=None ): assert isinstance(__a , __a ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): lowerCAmelCase_ = old_checkpoint[path] lowerCAmelCase_ = old_tensor.shape[0] // 3 lowerCAmelCase_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) lowerCAmelCase_ = old_tensor.shape[0] // config["num_head_channels"] // 3 lowerCAmelCase_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = old_tensor.split(channels // num_heads , dim=1 ) lowerCAmelCase_ = query.reshape(__a ) lowerCAmelCase_ = key.reshape(__a ) lowerCAmelCase_ = value.reshape(__a ) for path in paths: lowerCAmelCase_ = path["new"] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here lowerCAmelCase_ = new_path.replace("middle_block.0" , "mid_block.resnets.0" ) lowerCAmelCase_ = new_path.replace("middle_block.1" , "mid_block.attentions.0" ) lowerCAmelCase_ = new_path.replace("middle_block.2" , "mid_block.resnets.1" ) if additional_replacements is not None: for replacement in additional_replacements: lowerCAmelCase_ = new_path.replace(replacement["old"] , replacement["new"] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: lowerCAmelCase_ = old_checkpoint[path["old"]][:, :, 0] else: lowerCAmelCase_ = old_checkpoint[path["old"]] def A(__a: Tuple , __a: str ): lowerCAmelCase_ = {} lowerCAmelCase_ = checkpoint["time_embed.0.weight"] lowerCAmelCase_ = checkpoint["time_embed.0.bias"] lowerCAmelCase_ = checkpoint["time_embed.2.weight"] lowerCAmelCase_ = checkpoint["time_embed.2.bias"] lowerCAmelCase_ = checkpoint["input_blocks.0.0.weight"] lowerCAmelCase_ = checkpoint["input_blocks.0.0.bias"] lowerCAmelCase_ = checkpoint["out.0.weight"] lowerCAmelCase_ = checkpoint["out.0.bias"] lowerCAmelCase_ = checkpoint["out.2.weight"] lowerCAmelCase_ = checkpoint["out.2.bias"] # Retrieves the keys for the input blocks only lowerCAmelCase_ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} ) lowerCAmelCase_ = { layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key] for layer_id in range(__a ) } # Retrieves the keys for the middle blocks only lowerCAmelCase_ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} ) lowerCAmelCase_ = { layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key] for layer_id in range(__a ) } # Retrieves the keys for the output blocks only lowerCAmelCase_ = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} ) lowerCAmelCase_ = { layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key] for layer_id in range(__a ) } for i in range(1 , __a ): lowerCAmelCase_ = (i - 1) // (config["num_res_blocks"] + 1) lowerCAmelCase_ = (i - 1) % (config["num_res_blocks"] + 1) lowerCAmelCase_ = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key] lowerCAmelCase_ = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key] if F"input_blocks.{i}.0.op.weight" in checkpoint: lowerCAmelCase_ = checkpoint[ F"input_blocks.{i}.0.op.weight" ] lowerCAmelCase_ = checkpoint[ F"input_blocks.{i}.0.op.bias" ] continue lowerCAmelCase_ = renew_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"input_blocks.{i}.0", "new": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"} lowerCAmelCase_ = {"old": "resnets.2.op", "new": "downsamplers.0.op"} assign_to_checkpoint( __a , __a , __a , additional_replacements=[meta_path, resnet_op] , config=__a ) if len(__a ): lowerCAmelCase_ = renew_attention_paths(__a ) lowerCAmelCase_ = { "old": F"input_blocks.{i}.1", "new": F"down_blocks.{block_id}.attentions.{layer_in_block_id}", } lowerCAmelCase_ = { F"input_blocks.{i}.1.qkv.bias": { "key": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", "query": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", "value": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, F"input_blocks.{i}.1.qkv.weight": { "key": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", "query": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", "value": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( __a , __a , __a , additional_replacements=[meta_path] , attention_paths_to_split=__a , config=__a , ) lowerCAmelCase_ = middle_blocks[0] lowerCAmelCase_ = middle_blocks[1] lowerCAmelCase_ = middle_blocks[2] lowerCAmelCase_ = renew_resnet_paths(__a ) assign_to_checkpoint(__a , __a , __a , config=__a ) lowerCAmelCase_ = renew_resnet_paths(__a ) assign_to_checkpoint(__a , __a , __a , config=__a ) lowerCAmelCase_ = renew_attention_paths(__a ) lowerCAmelCase_ = { "middle_block.1.qkv.bias": { "key": "mid_block.attentions.0.key.bias", "query": "mid_block.attentions.0.query.bias", "value": "mid_block.attentions.0.value.bias", }, "middle_block.1.qkv.weight": { "key": "mid_block.attentions.0.key.weight", "query": "mid_block.attentions.0.query.weight", "value": "mid_block.attentions.0.value.weight", }, } assign_to_checkpoint( __a , __a , __a , attention_paths_to_split=__a , config=__a ) for i in range(__a ): lowerCAmelCase_ = i // (config["num_res_blocks"] + 1) lowerCAmelCase_ = i % (config["num_res_blocks"] + 1) lowerCAmelCase_ = [shave_segments(__a , 2 ) for name in output_blocks[i]] lowerCAmelCase_ = {} for layer in output_block_layers: lowerCAmelCase_ , lowerCAmelCase_ = layer.split("." )[0], shave_segments(__a , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(__a ) else: lowerCAmelCase_ = [layer_name] if len(__a ) > 1: lowerCAmelCase_ = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key] lowerCAmelCase_ = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key] lowerCAmelCase_ = renew_resnet_paths(__a ) lowerCAmelCase_ = renew_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"output_blocks.{i}.0", "new": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) if ["conv.weight", "conv.bias"] in output_block_list.values(): lowerCAmelCase_ = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] ) lowerCAmelCase_ = checkpoint[ F"output_blocks.{i}.{index}.conv.weight" ] lowerCAmelCase_ = checkpoint[ F"output_blocks.{i}.{index}.conv.bias" ] # Clear attentions as they have been attributed above. if len(__a ) == 2: lowerCAmelCase_ = [] if len(__a ): lowerCAmelCase_ = renew_attention_paths(__a ) lowerCAmelCase_ = { "old": F"output_blocks.{i}.1", "new": F"up_blocks.{block_id}.attentions.{layer_in_block_id}", } lowerCAmelCase_ = { F"output_blocks.{i}.1.qkv.bias": { "key": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", "query": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", "value": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, F"output_blocks.{i}.1.qkv.weight": { "key": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", "query": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", "value": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( __a , __a , __a , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None , config=__a , ) else: lowerCAmelCase_ = renew_resnet_paths(__a , n_shave_prefix_segments=1 ) for path in resnet_0_paths: lowerCAmelCase_ = ".".join(["output_blocks", str(__a ), path["old"]] ) lowerCAmelCase_ = ".".join(["up_blocks", str(__a ), "resnets", str(__a ), path["new"]] ) lowerCAmelCase_ = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') lowerCamelCase__ = parser.parse_args() lowerCamelCase__ = torch.load(args.checkpoint_path) with open(args.config_file) as f: lowerCamelCase__ = json.loads(f.read()) lowerCamelCase__ = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] lowerCamelCase__ = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: lowerCamelCase__ = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) lowerCamelCase__ = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) lowerCamelCase__ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
22
def A(__a: Optional[Any] ): lowerCAmelCase_ = len(__a ) lowerCAmelCase_ = sum(__a ) lowerCAmelCase_ = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): lowerCAmelCase_ = True for i in range(1 , s + 1 ): lowerCAmelCase_ = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): lowerCAmelCase_ = dp[i][j - 1] if arr[i - 1] <= j: lowerCAmelCase_ = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: lowerCAmelCase_ = s - 2 * j break return diff
22
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: lowerCamelCase__ = None lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} lowerCamelCase__ = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''', }, } lowerCamelCase__ = { '''albert-base-v1''': 5_12, '''albert-large-v1''': 5_12, '''albert-xlarge-v1''': 5_12, '''albert-xxlarge-v1''': 5_12, '''albert-base-v2''': 5_12, '''albert-large-v2''': 5_12, '''albert-xlarge-v2''': 5_12, '''albert-xxlarge-v2''': 5_12, } lowerCamelCase__ = '''▁''' class __magic_name__ (__lowercase ): lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = AlbertTokenizer def __init__( self , _a=None , _a=None , _a=True , _a=True , _a=False , _a="[CLS]" , _a="[SEP]" , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , **_a , ) -> Tuple: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. lowerCAmelCase_ = ( AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a ) if isinstance(_a , _a ) else mask_token ) super().__init__( _a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , ) lowerCAmelCase_ = do_lower_case lowerCAmelCase_ = remove_space lowerCAmelCase_ = keep_accents lowerCAmelCase_ = vocab_file lowerCAmelCase_ = False if not self.vocab_file else True def __a ( self , _a , _a = None ) -> List[int]: lowerCAmelCase_ = [self.sep_token_id] lowerCAmelCase_ = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __a ( self , _a , _a = None ) -> List[int]: lowerCAmelCase_ = [self.sep_token_id] lowerCAmelCase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __a ( self , _a , _a = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(_a ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return lowerCAmelCase_ = os.path.join( _a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ): copyfile(self.vocab_file , _a ) return (out_vocab_file,)
22
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def A(__a: Any , __a: Union[str, Any] , __a: List[str] ): lowerCAmelCase_ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] lowerCAmelCase_ = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } lowerCAmelCase_ = F"{src_lang}-{tgt_lang}" lowerCAmelCase_ = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n" os.makedirs(__a , exist_ok=__a ) lowerCAmelCase_ = os.path.join(__a , "README.md" ) print(F"Generating {path}" ) with open(__a , "w" , encoding="utf-8" ) as f: f.write(__a ) # make sure we are under the root of the project lowerCamelCase__ = Path(__file__).resolve().parent.parent.parent lowerCamelCase__ = repo_dir / '''model_cards''' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model_name.split('''-''') lowerCamelCase__ = model_cards_dir / '''facebook''' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
22
1
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class __magic_name__ (__lowercase ): def __init__( self , _a , _a , _a = None , _a = None , _a = False , **_a , ) -> Any: super().__init__(features=_a , cache_dir=_a , keep_in_memory=_a , **_a ) lowerCAmelCase_ = Sql( cache_dir=_a , features=_a , sql=_a , con=_a , **_a , ) def __a ( self ) -> Optional[int]: lowerCAmelCase_ = None lowerCAmelCase_ = None lowerCAmelCase_ = None lowerCAmelCase_ = None self.builder.download_and_prepare( download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , ) # Build dataset for splits lowerCAmelCase_ = self.builder.as_dataset( split="train" , verification_mode=_a , in_memory=self.keep_in_memory ) return dataset class __magic_name__ : def __init__( self , _a , _a , _a , _a = None , _a = None , **_a , ) -> List[str]: if num_proc is not None and num_proc <= 0: raise ValueError(f"num_proc {num_proc} must be an integer > 0." ) lowerCAmelCase_ = dataset lowerCAmelCase_ = name lowerCAmelCase_ = con lowerCAmelCase_ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE lowerCAmelCase_ = num_proc lowerCAmelCase_ = to_sql_kwargs def __a ( self ) -> int: lowerCAmelCase_ = self.to_sql_kwargs.pop("sql" , _a ) lowerCAmelCase_ = self.to_sql_kwargs.pop("con" , _a ) lowerCAmelCase_ = self.to_sql_kwargs.pop("index" , _a ) lowerCAmelCase_ = self._write(index=_a , **self.to_sql_kwargs ) return written def __a ( self , _a ) -> Optional[int]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = args lowerCAmelCase_ = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs lowerCAmelCase_ = query_table( table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , ) lowerCAmelCase_ = batch.to_pandas() lowerCAmelCase_ = df.to_sql(self.name , self.con , index=_a , **_a ) return num_rows or len(_a ) def __a ( self , _a , **_a ) -> int: lowerCAmelCase_ = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: lowerCAmelCase_ , lowerCAmelCase_ = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ): written += num_rows return written
22
import re from filelock import FileLock try: import nltk lowerCamelCase__ = True except (ImportError, ModuleNotFoundError): lowerCamelCase__ = False if NLTK_AVAILABLE: with FileLock('''.lock''') as lock: nltk.download('''punkt''', quiet=True) def A(__a: str ): re.sub("<n>" , "" , __a ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__a ) )
22
1
from __future__ import annotations def A(__a: str , __a: list[str] | None = None ): lowerCAmelCase_ = word_bank or [] # create a table lowerCAmelCase_ = len(__a ) + 1 lowerCAmelCase_ = [] for _ in range(__a ): table.append([] ) # seed value lowerCAmelCase_ = [[]] # because empty string has empty combination # iterate through the indices for i in range(__a ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(__a )] == word: lowerCAmelCase_ = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(__a )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(__a )]: combination.reverse() return table[len(__a )] if __name__ == "__main__": print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa'''])) print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t'''])) print( all_construct( '''hexagonosaurus''', ['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''], ) )
22
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowerCamelCase__ = { '''configuration_encodec''': [ '''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''EncodecConfig''', ], '''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''', '''EncodecModel''', '''EncodecPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig lowerCamelCase__ = { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''', } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''albert''' def __init__( self , _a=30000 , _a=128 , _a=4096 , _a=12 , _a=1 , _a=64 , _a=16384 , _a=1 , _a="gelu_new" , _a=0 , _a=0 , _a=512 , _a=2 , _a=0.0_2 , _a=1E-12 , _a=0.1 , _a="absolute" , _a=0 , _a=2 , _a=3 , **_a , ) -> int: super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a ) lowerCAmelCase_ = vocab_size lowerCAmelCase_ = embedding_size lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_hidden_groups lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = inner_group_num lowerCAmelCase_ = hidden_act lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = max_position_embeddings lowerCAmelCase_ = type_vocab_size lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = classifier_dropout_prob lowerCAmelCase_ = position_embedding_type class __magic_name__ (__lowercase ): @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowerCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCAmelCase_ = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
22
import logging from transformers import PretrainedConfig lowerCamelCase__ = logging.getLogger(__name__) lowerCamelCase__ = { '''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''', } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''bertabs''' def __init__( self , _a=30522 , _a=512 , _a=6 , _a=512 , _a=8 , _a=512 , _a=0.2 , _a=6 , _a=768 , _a=8 , _a=2048 , _a=0.2 , **_a , ) -> List[Any]: super().__init__(**_a ) lowerCAmelCase_ = vocab_size lowerCAmelCase_ = max_pos lowerCAmelCase_ = enc_layers lowerCAmelCase_ = enc_hidden_size lowerCAmelCase_ = enc_heads lowerCAmelCase_ = enc_ff_size lowerCAmelCase_ = enc_dropout lowerCAmelCase_ = dec_layers lowerCAmelCase_ = dec_hidden_size lowerCAmelCase_ = dec_heads lowerCAmelCase_ = dec_ff_size lowerCAmelCase_ = dec_dropout
22
1
import copy import os import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np import pyarrow as pa import pyarrow.parquet as pq import pytest from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence from datasets.features import ArrayaD, ClassLabel, Features, Image, Value from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects from datasets.keyhash import DuplicatedKeysError, InvalidKeyError from .utils import require_pil class __magic_name__ (__lowercase ): def __a ( self ) -> List[str]: lowerCAmelCase_ = pa.array(TypedSequence([1, 2, 3] ) ) self.assertEqual(arr.type , pa.intaa() ) def __a ( self ) -> int: with self.assertRaises(_a ): lowerCAmelCase_ = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() ) def __a ( self ) -> Optional[int]: with self.assertRaises(_a ): lowerCAmelCase_ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) ) def __a ( self ) -> Any: lowerCAmelCase_ = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) ) self.assertEqual(arr.type , pa.intaa() ) def __a ( self ) -> Optional[int]: with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): lowerCAmelCase_ = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) ) def __a ( self ) -> Dict: lowerCAmelCase_ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) ) self.assertEqual(arr.type , pa.intaa() ) def __a ( self ) -> int: lowerCAmelCase_ = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) ) self.assertEqual(arr.type , pa.string() ) def __a ( self ) -> Tuple: lowerCAmelCase_ = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) ) def __a ( self ) -> Optional[Any]: with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): lowerCAmelCase_ = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) ) def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) ) self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) ) def __a ( self ) -> List[str]: lowerCAmelCase_ = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) ) self.assertEqual(arr.type , pa.string() ) @require_pil def __a ( self ) -> int: import PIL.Image lowerCAmelCase_ = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) ) with patch( "datasets.arrow_writer.cast_to_python_objects" , side_effect=_a ) as mock_cast_to_python_objects: lowerCAmelCase_ = pa.array(TypedSequence([{"path": None, "bytes": b"image_bytes"}, pil_image] , type=Image() ) ) lowerCAmelCase_ , lowerCAmelCase_ = mock_cast_to_python_objects.call_args_list[-1] self.assertIn("optimize_list_casting" , _a ) self.assertFalse(kwargs["optimize_list_casting"] ) def A(__a: Optional[Any] , __a: int ): lowerCAmelCase_ = pa.BufferReader(__a ) if isinstance(__a , pa.Buffer ) else pa.memory_map(__a ) lowerCAmelCase_ = pa.ipc.open_stream(__a ) lowerCAmelCase_ = f.read_all() assert len(pa_table.to_batches() ) == expected_num_chunks assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} del pa_table @pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] ) @pytest.mark.parametrize( "fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] ) def A(__a: int , __a: int ): lowerCAmelCase_ = pa.BufferOutputStream() lowerCAmelCase_ = pa.schema(__a ) if fields else None with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer: writer.write({"col_1": "foo", "col_2": 1} ) writer.write({"col_1": "bar", "col_2": 2} ) lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: lowerCAmelCase_ = {"col_1": pa.string(), "col_2": pa.intaa()} assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def A(): lowerCAmelCase_ = pa.BufferOutputStream() lowerCAmelCase_ = Features({"labels": ClassLabel(names=["neg", "pos"] )} ) with ArrowWriter(stream=__a , features=__a ) as writer: writer.write({"labels": 0} ) writer.write({"labels": 1} ) lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == features.arrow_schema assert writer._schema.metadata == features.arrow_schema.metadata lowerCAmelCase_ = pa.BufferReader(output.getvalue() ) lowerCAmelCase_ = pa.ipc.open_stream(__a ) lowerCAmelCase_ = f.read_all() lowerCAmelCase_ = pa_table.schema assert pa_table.num_rows == 2 assert schema == features.arrow_schema assert schema.metadata == features.arrow_schema.metadata assert features == Features.from_arrow_schema(__a ) @pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] ) def A(__a: int ): lowerCAmelCase_ = pa.BufferOutputStream() with ArrowWriter( stream=__a , writer_batch_size=__a , hash_salt="split_name" , check_duplicates=__a , ) as writer: with pytest.raises(__a ): writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] ) lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize() @pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] ) def A(__a: Any ): lowerCAmelCase_ = pa.BufferOutputStream() with ArrowWriter( stream=__a , writer_batch_size=__a , hash_salt="split_name" , check_duplicates=__a , ) as writer: with pytest.raises(__a ): writer.write({"col_1": "foo", "col_2": 1} , key=10 ) writer.write({"col_1": "bar", "col_2": 2} , key=10 ) lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize() @pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] ) def A(__a: int ): lowerCAmelCase_ = pa.BufferOutputStream() with ArrowWriter( stream=__a , writer_batch_size=__a , hash_salt="split_name" , check_duplicates=__a , ) as writer: writer.write({"col_1": "foo", "col_2": 1} , key=1 ) writer.write({"col_1": "bar", "col_2": 2} , key=2 ) lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] ) @pytest.mark.parametrize( "fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] ) def A(__a: List[str] , __a: int ): lowerCAmelCase_ = pa.BufferOutputStream() lowerCAmelCase_ = pa.schema(__a ) if fields else None with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer: writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) writer.write_batch({"col_1": [], "col_2": []} ) lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: lowerCAmelCase_ = {"col_1": pa.string(), "col_2": pa.intaa()} assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] ) @pytest.mark.parametrize( "fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] ) def A(__a: Union[str, Any] , __a: List[Any] ): lowerCAmelCase_ = pa.BufferOutputStream() lowerCAmelCase_ = pa.schema(__a ) if fields else None with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer: writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) ) lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: lowerCAmelCase_ = {"col_1": pa.string(), "col_2": pa.intaa()} assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] ) @pytest.mark.parametrize( "fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] ) def A(__a: Dict , __a: Dict ): lowerCAmelCase_ = pa.BufferOutputStream() lowerCAmelCase_ = pa.schema(__a ) if fields else None with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer: writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) ) writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) ) lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: lowerCAmelCase_ = {"col_1": pa.string(), "col_2": pa.intaa()} assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def A(): with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase_ = {"col_1": pa.string(), "col_2": pa.intaa()} lowerCAmelCase_ = os.path.join(__a , "test.arrow" ) with ArrowWriter(path=__a , schema=pa.schema(__a ) ) as writer: writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata ) _check_output(__a , 1 ) def A(__a: str ): if pa.types.is_list(__a ): return get_base_dtype(arr_type.value_type ) else: return arr_type def A(__a: Tuple , __a: Any ): if isinstance(lst[0] , __a ): change_first_primitive_element_in_list(lst[0] , __a ) else: lowerCAmelCase_ = value @pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] ) @pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def A(__a: Any , __a: str , __a: int ): lowerCAmelCase_ = pa.array(TypedSequence(__a , optimized_int_type=__a ) ) assert get_base_dtype(arr.type ) == expected_dtype @pytest.mark.parametrize( "col, expected_dtype" , [ ("attention_mask", pa.inta()), ("special_tokens_mask", pa.inta()), ("token_type_ids", pa.inta()), ("input_ids", pa.intaa()), ("other", pa.intaa()), ] , ) @pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def A(__a: Tuple , __a: str , __a: Optional[int] ): # in range lowerCAmelCase_ = pa.array(OptimizedTypedSequence(__a , col=__a ) ) assert get_base_dtype(arr.type ) == expected_dtype # not in range if col != "other": # avoids errors due to in-place modifications lowerCAmelCase_ = copy.deepcopy(__a ) lowerCAmelCase_ = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1 change_first_primitive_element_in_list(__a , __a ) lowerCAmelCase_ = pa.array(OptimizedTypedSequence(__a , col=__a ) ) assert get_base_dtype(arr.type ) == pa.intaa() @pytest.mark.parametrize("raise_exception" , [False, True] ) def A(__a: Dict , __a: List[str] ): lowerCAmelCase_ = str(tmp_path / "dataset-train.arrow" ) try: with ArrowWriter(path=__a ) as writer: if raise_exception: raise pa.lib.ArrowInvalid() else: writer.stream.close() except pa.lib.ArrowInvalid: pass finally: assert writer.stream.closed def A(__a: Union[str, Any] ): lowerCAmelCase_ = "mock://dataset-train.arrow" with ArrowWriter(path=__a , storage_options=mockfs.storage_options ) as writer: assert isinstance(writer._fs , type(__a ) ) assert writer._fs.storage_options == mockfs.storage_options writer.write({"col_1": "foo", "col_2": 1} ) writer.write({"col_1": "bar", "col_2": 2} ) lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert mockfs.exists(__a ) def A(): lowerCAmelCase_ = pa.BufferOutputStream() with ParquetWriter(stream=__a ) as writer: writer.write({"col_1": "foo", "col_2": 1} ) writer.write({"col_1": "bar", "col_2": 2} ) lowerCAmelCase_ , lowerCAmelCase_ = writer.finalize() assert num_examples == 2 assert num_bytes > 0 lowerCAmelCase_ = pa.BufferReader(output.getvalue() ) lowerCAmelCase_ = pq.read_table(__a ) assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} @require_pil @pytest.mark.parametrize("embed_local_files" , [False, True] ) def A(__a: Tuple , __a: Dict ): import PIL.Image lowerCAmelCase_ = str(tmp_path / "test_image_rgb.jpg" ) PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(__a , format="png" ) lowerCAmelCase_ = pa.BufferOutputStream() with ParquetWriter( stream=__a , features=Features({"image": Image()} ) , embed_local_files=__a ) as writer: writer.write({"image": image_path} ) writer.finalize() lowerCAmelCase_ = pa.BufferReader(output.getvalue() ) lowerCAmelCase_ = pq.read_table(__a ) lowerCAmelCase_ = pa_table.to_pydict() if embed_local_files: assert isinstance(out["image"][0]["path"] , __a ) with open(__a , "rb" ) as f: assert out["image"][0]["bytes"] == f.read() else: assert out["image"][0]["path"] == image_path assert out["image"][0]["bytes"] is None def A(): lowerCAmelCase_ = pa.schema([pa.field("col_1" , pa.string() , nullable=__a )] ) lowerCAmelCase_ = pa.BufferOutputStream() with ArrowWriter(stream=__a ) as writer: writer._build_writer(inferred_schema=__a ) assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
22
import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def A(__a: Tuple , __a: Union[str, Any] ): lowerCAmelCase_ = checkpoint lowerCAmelCase_ = {} lowerCAmelCase_ = vae_state_dict["encoder.conv_in.weight"] lowerCAmelCase_ = vae_state_dict["encoder.conv_in.bias"] lowerCAmelCase_ = vae_state_dict["encoder.conv_out.weight"] lowerCAmelCase_ = vae_state_dict["encoder.conv_out.bias"] lowerCAmelCase_ = vae_state_dict["encoder.norm_out.weight"] lowerCAmelCase_ = vae_state_dict["encoder.norm_out.bias"] lowerCAmelCase_ = vae_state_dict["decoder.conv_in.weight"] lowerCAmelCase_ = vae_state_dict["decoder.conv_in.bias"] lowerCAmelCase_ = vae_state_dict["decoder.conv_out.weight"] lowerCAmelCase_ = vae_state_dict["decoder.conv_out.bias"] lowerCAmelCase_ = vae_state_dict["decoder.norm_out.weight"] lowerCAmelCase_ = vae_state_dict["decoder.norm_out.bias"] lowerCAmelCase_ = vae_state_dict["quant_conv.weight"] lowerCAmelCase_ = vae_state_dict["quant_conv.bias"] lowerCAmelCase_ = vae_state_dict["post_quant_conv.weight"] lowerCAmelCase_ = vae_state_dict["post_quant_conv.bias"] # Retrieves the keys for the encoder down blocks only lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} ) lowerCAmelCase_ = { layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(__a ) } # Retrieves the keys for the decoder up blocks only lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} ) lowerCAmelCase_ = { layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(__a ) } for i in range(__a ): lowerCAmelCase_ = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key] if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict: lowerCAmelCase_ = vae_state_dict.pop( F"encoder.down.{i}.downsample.conv.weight" ) lowerCAmelCase_ = vae_state_dict.pop( F"encoder.down.{i}.downsample.conv.bias" ) lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"down.{i}.block", "new": F"down_blocks.{i}.resnets"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key] lowerCAmelCase_ = 2 for i in range(1 , num_mid_res_blocks + 1 ): lowerCAmelCase_ = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key] lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key] lowerCAmelCase_ = renew_vae_attention_paths(__a ) lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) conv_attn_to_linear(__a ) for i in range(__a ): lowerCAmelCase_ = num_up_blocks - 1 - i lowerCAmelCase_ = [ key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key ] if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict: lowerCAmelCase_ = vae_state_dict[ F"decoder.up.{block_id}.upsample.conv.weight" ] lowerCAmelCase_ = vae_state_dict[ F"decoder.up.{block_id}.upsample.conv.bias" ] lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"up.{block_id}.block", "new": F"up_blocks.{i}.resnets"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key] lowerCAmelCase_ = 2 for i in range(1 , num_mid_res_blocks + 1 ): lowerCAmelCase_ = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key] lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key] lowerCAmelCase_ = renew_vae_attention_paths(__a ) lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) conv_attn_to_linear(__a ) return new_checkpoint def A(__a: str , __a: str , ): # Only support V1 lowerCAmelCase_ = requests.get( " https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" ) lowerCAmelCase_ = io.BytesIO(r.content ) lowerCAmelCase_ = OmegaConf.load(__a ) lowerCAmelCase_ = 512 lowerCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu" if checkpoint_path.endswith("safetensors" ): from safetensors import safe_open lowerCAmelCase_ = {} with safe_open(__a , framework="pt" , device="cpu" ) as f: for key in f.keys(): lowerCAmelCase_ = f.get_tensor(__a ) else: lowerCAmelCase_ = torch.load(__a , map_location=__a )["state_dict"] # Convert the VAE model. lowerCAmelCase_ = create_vae_diffusers_config(__a , image_size=__a ) lowerCAmelCase_ = custom_convert_ldm_vae_checkpoint(__a , __a ) lowerCAmelCase_ = AutoencoderKL(**__a ) vae.load_state_dict(__a ) vae.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') lowerCamelCase__ = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
22
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { '''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''', } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''deta''' lowerCamelCase__ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self , _a=None , _a=900 , _a=2048 , _a=6 , _a=2048 , _a=8 , _a=6 , _a=1024 , _a=8 , _a=0.0 , _a=True , _a="relu" , _a=256 , _a=0.1 , _a=0.0 , _a=0.0 , _a=0.0_2 , _a=1.0 , _a=True , _a=False , _a="sine" , _a=5 , _a=4 , _a=4 , _a=True , _a=300 , _a=True , _a=True , _a=1 , _a=5 , _a=2 , _a=1 , _a=1 , _a=5 , _a=2 , _a=0.1 , _a=0.2_5 , **_a , ) -> List[Any]: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) lowerCAmelCase_ = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] ) else: if isinstance(_a , _a ): lowerCAmelCase_ = backbone_config.pop("model_type" ) lowerCAmelCase_ = CONFIG_MAPPING[backbone_model_type] lowerCAmelCase_ = config_class.from_dict(_a ) lowerCAmelCase_ = backbone_config lowerCAmelCase_ = num_queries lowerCAmelCase_ = max_position_embeddings lowerCAmelCase_ = d_model lowerCAmelCase_ = encoder_ffn_dim lowerCAmelCase_ = encoder_layers lowerCAmelCase_ = encoder_attention_heads lowerCAmelCase_ = decoder_ffn_dim lowerCAmelCase_ = decoder_layers lowerCAmelCase_ = decoder_attention_heads lowerCAmelCase_ = dropout lowerCAmelCase_ = attention_dropout lowerCAmelCase_ = activation_dropout lowerCAmelCase_ = activation_function lowerCAmelCase_ = init_std lowerCAmelCase_ = init_xavier_std lowerCAmelCase_ = encoder_layerdrop lowerCAmelCase_ = auxiliary_loss lowerCAmelCase_ = position_embedding_type # deformable attributes lowerCAmelCase_ = num_feature_levels lowerCAmelCase_ = encoder_n_points lowerCAmelCase_ = decoder_n_points lowerCAmelCase_ = two_stage lowerCAmelCase_ = two_stage_num_proposals lowerCAmelCase_ = with_box_refine lowerCAmelCase_ = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError("If two_stage is True, with_box_refine must be True." ) # Hungarian matcher lowerCAmelCase_ = class_cost lowerCAmelCase_ = bbox_cost lowerCAmelCase_ = giou_cost # Loss coefficients lowerCAmelCase_ = mask_loss_coefficient lowerCAmelCase_ = dice_loss_coefficient lowerCAmelCase_ = bbox_loss_coefficient lowerCAmelCase_ = giou_loss_coefficient lowerCAmelCase_ = eos_coefficient lowerCAmelCase_ = focal_alpha super().__init__(is_encoder_decoder=_a , **_a ) @property def __a ( self ) -> int: return self.encoder_attention_heads @property def __a ( self ) -> int: return self.d_model def __a ( self ) -> Tuple: lowerCAmelCase_ = copy.deepcopy(self.__dict__ ) lowerCAmelCase_ = self.backbone_config.to_dict() lowerCAmelCase_ = self.__class__.model_type return output
22
def A(): return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] lowerCamelCase__ = generate_large_matrix() lowerCamelCase__ = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def A(__a: list[list[int]] ): assert all(row == sorted(__a , reverse=__a ) for row in grid ) assert all(list(__a ) == sorted(__a , reverse=__a ) for col in zip(*__a ) ) def A(__a: list[int] ): lowerCAmelCase_ = 0 lowerCAmelCase_ = len(__a ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: lowerCAmelCase_ = (left + right) // 2 lowerCAmelCase_ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: lowerCAmelCase_ = mid + 1 else: lowerCAmelCase_ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__a ) def A(__a: list[list[int]] ): lowerCAmelCase_ = 0 lowerCAmelCase_ = len(grid[0] ) for i in range(len(__a ) ): lowerCAmelCase_ = find_negative_index(grid[i][:bound] ) total += bound return (len(__a ) * len(grid[0] )) - total def A(__a: list[list[int]] ): return len([number for row in grid for number in row if number < 0] ) def A(__a: list[list[int]] ): lowerCAmelCase_ = 0 for row in grid: for i, number in enumerate(__a ): if number < 0: total += len(__a ) - i break return total def A(): from timeit import timeit print("Running benchmarks" ) lowerCAmelCase_ = ( "from __main__ import count_negatives_binary_search, " "count_negatives_brute_force, count_negatives_brute_force_with_break, grid" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): lowerCAmelCase_ = timeit(F"{func}(grid=grid)" , setup=__a , number=500 ) print(F"{func}() took {time:0.4f} seconds" ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
22
1
from __future__ import annotations def A(__a: list[float] , __a: list[float] ): lowerCAmelCase_ = sorted(numsa + numsa ) lowerCAmelCase_ , lowerCAmelCase_ = divmod(len(__a ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase__ = [float(x) for x in input('''Enter the elements of first array: ''').split()] lowerCamelCase__ = [float(x) for x in input('''Enter the elements of second array: ''').split()] print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
22
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging lowerCamelCase__ = logging.get_logger(__name__) def A(__a: Dict ): lowerCAmelCase_ = r"\w+[.]\d+" lowerCAmelCase_ = re.findall(__a , __a ) for pat in pats: lowerCAmelCase_ = key.replace(__a , "_".join(pat.split("." ) ) ) return key def A(__a: str , __a: Tuple , __a: List[Any] ): lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",) if ( any("norm" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowerCAmelCase_ = pt_tuple_key[:-1] + ("embedding",) return renamed_pt_tuple_key, pt_tensor # conv layer lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowerCAmelCase_ = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight": lowerCAmelCase_ = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCAmelCase_ = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCAmelCase_ = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def A(__a: Dict , __a: Any , __a: List[Any]=42 ): # Step 1: Convert pytorch tensor to numpy lowerCAmelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowerCAmelCase_ = flax_model.init_weights(PRNGKey(__a ) ) lowerCAmelCase_ = flatten_dict(__a ) lowerCAmelCase_ = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCAmelCase_ = rename_key(__a ) lowerCAmelCase_ = tuple(renamed_pt_key.split("." ) ) # Correctly rename weight parameters lowerCAmelCase_ , lowerCAmelCase_ = rename_key_and_reshape_tensor(__a , __a , __a ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." ) # also add unexpected weight so that warning is thrown lowerCAmelCase_ = jnp.asarray(__a ) return unflatten_dict(__a )
22
1
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging lowerCamelCase__ = logging.get_logger(__name__) def A(__a: Dict ): lowerCAmelCase_ = r"\w+[.]\d+" lowerCAmelCase_ = re.findall(__a , __a ) for pat in pats: lowerCAmelCase_ = key.replace(__a , "_".join(pat.split("." ) ) ) return key def A(__a: str , __a: Tuple , __a: List[Any] ): lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",) if ( any("norm" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowerCAmelCase_ = pt_tuple_key[:-1] + ("embedding",) return renamed_pt_tuple_key, pt_tensor # conv layer lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowerCAmelCase_ = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight": lowerCAmelCase_ = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCAmelCase_ = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCAmelCase_ = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def A(__a: Dict , __a: Any , __a: List[Any]=42 ): # Step 1: Convert pytorch tensor to numpy lowerCAmelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowerCAmelCase_ = flax_model.init_weights(PRNGKey(__a ) ) lowerCAmelCase_ = flatten_dict(__a ) lowerCAmelCase_ = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCAmelCase_ = rename_key(__a ) lowerCAmelCase_ = tuple(renamed_pt_key.split("." ) ) # Correctly rename weight parameters lowerCAmelCase_ , lowerCAmelCase_ = rename_key_and_reshape_tensor(__a , __a , __a ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." ) # also add unexpected weight so that warning is thrown lowerCAmelCase_ = jnp.asarray(__a ) return unflatten_dict(__a )
22
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
1
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class __magic_name__ : @property def __a ( self ) -> int: return self.get_dummy_input() @property def __a ( self ) -> str: if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(f"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'." ) def __a ( self , _a=True , _a=False , _a=False , _a=False , ) -> List[Any]: lowerCAmelCase_ = 4 lowerCAmelCase_ = 32 lowerCAmelCase_ = (32, 32) lowerCAmelCase_ = torch.manual_seed(0 ) lowerCAmelCase_ = torch.device(_a ) lowerCAmelCase_ = (batch_size, num_channels) + sizes lowerCAmelCase_ = randn_tensor(_a , generator=_a , device=_a ) lowerCAmelCase_ = {"hidden_states": hidden_states} if include_temb: lowerCAmelCase_ = 128 lowerCAmelCase_ = randn_tensor((batch_size, temb_channels) , generator=_a , device=_a ) if include_res_hidden_states_tuple: lowerCAmelCase_ = torch.manual_seed(1 ) lowerCAmelCase_ = (randn_tensor(_a , generator=_a , device=_a ),) if include_encoder_hidden_states: lowerCAmelCase_ = floats_tensor((batch_size, 32, 32) ).to(_a ) if include_skip_sample: lowerCAmelCase_ = randn_tensor(((batch_size, 3) + sizes) , generator=_a , device=_a ) return dummy_input def __a ( self ) -> List[str]: lowerCAmelCase_ = { "in_channels": 32, "out_channels": 32, "temb_channels": 128, } if self.block_type == "up": lowerCAmelCase_ = 32 if self.block_type == "mid": init_dict.pop("out_channels" ) lowerCAmelCase_ = self.dummy_input return init_dict, inputs_dict def __a ( self , _a ) -> Optional[int]: lowerCAmelCase_ , lowerCAmelCase_ = self.prepare_init_args_and_inputs_for_common() lowerCAmelCase_ = self.block_class(**_a ) unet_block.to(_a ) unet_block.eval() with torch.no_grad(): lowerCAmelCase_ = unet_block(**_a ) if isinstance(_a , _a ): lowerCAmelCase_ = output[0] self.assertEqual(output.shape , self.output_shape ) lowerCAmelCase_ = output[0, -1, -3:, -3:] lowerCAmelCase_ = torch.tensor(_a ).to(_a ) assert torch_all_close(output_slice.flatten() , _a , atol=5E-3 ) @unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" ) def __a ( self ) -> List[Any]: lowerCAmelCase_ , lowerCAmelCase_ = self.prepare_init_args_and_inputs_for_common() lowerCAmelCase_ = self.block_class(**_a ) model.to(_a ) model.train() lowerCAmelCase_ = model(**_a ) if isinstance(_a , _a ): lowerCAmelCase_ = output[0] lowerCAmelCase_ = torch.device(_a ) lowerCAmelCase_ = randn_tensor(output.shape , device=_a ) lowerCAmelCase_ = torch.nn.functional.mse_loss(_a , _a ) loss.backward()
22
import math def A(__a: int ): return math.sqrt(__a ) * math.sqrt(__a ) == num def A(__a: int ): lowerCAmelCase_ = 0 lowerCAmelCase_ = n while left <= right: lowerCAmelCase_ = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: lowerCAmelCase_ = mid - 1 else: lowerCAmelCase_ = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
22
1
from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging lowerCamelCase__ = logging.get_logger(__name__) class __magic_name__ : lowerCamelCase__ = 42 lowerCamelCase__ = None @staticmethod def __a ( ) -> List[Any]: raise NotImplementedError def __a ( self , _a , _a , _a , **_a ) -> int: raise NotImplementedError def __a ( self , _a ) -> str: raise NotImplementedError def __a ( self ) -> Optional[Any]: if not self.is_available(): raise RuntimeError( f"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." ) @classmethod def __a ( cls ) -> Union[str, Any]: return f"`pip install {cls.pip_package or cls.name}`" class __magic_name__ (__lowercase ): lowerCamelCase__ = '''optuna''' @staticmethod def __a ( ) -> Union[str, Any]: return is_optuna_available() def __a ( self , _a , _a , _a , **_a ) -> Any: return run_hp_search_optuna(_a , _a , _a , **_a ) def __a ( self , _a ) -> int: return default_hp_space_optuna(_a ) class __magic_name__ (__lowercase ): lowerCamelCase__ = '''ray''' lowerCamelCase__ = '''\'ray[tune]\'''' @staticmethod def __a ( ) -> str: return is_ray_available() def __a ( self , _a , _a , _a , **_a ) -> Any: return run_hp_search_ray(_a , _a , _a , **_a ) def __a ( self , _a ) -> Dict: return default_hp_space_ray(_a ) class __magic_name__ (__lowercase ): lowerCamelCase__ = '''sigopt''' @staticmethod def __a ( ) -> Union[str, Any]: return is_sigopt_available() def __a ( self , _a , _a , _a , **_a ) -> str: return run_hp_search_sigopt(_a , _a , _a , **_a ) def __a ( self , _a ) -> str: return default_hp_space_sigopt(_a ) class __magic_name__ (__lowercase ): lowerCamelCase__ = '''wandb''' @staticmethod def __a ( ) -> Dict: return is_wandb_available() def __a ( self , _a , _a , _a , **_a ) -> Union[str, Any]: return run_hp_search_wandb(_a , _a , _a , **_a ) def __a ( self , _a ) -> Union[str, Any]: return default_hp_space_wandb(_a ) lowerCamelCase__ = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def A(): lowerCAmelCase_ = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(__a ) > 0: lowerCAmelCase_ = available_backends[0].name if len(__a ) > 1: logger.info( F"{len(__a )} hyperparameter search backends available. Using {name} as the default." ) return name raise RuntimeError( "No hyperparameter search backend available.\n" + "\n".join( F" - To install {backend.name} run {backend.pip_install()}" for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
22
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers lowerCamelCase__ = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append('''dataclasses''') if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append('''importlib_metadata''') for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def A(__a: Dict , __a: List[str]=None ): require_version(deps[pkg] , __a )
22
1
import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class __magic_name__ (unittest.TestCase ): @slow def __a ( self ) -> Dict: lowerCAmelCase_ = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" ) lowerCAmelCase_ = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" ) model.to(_a ) from datasets import load_dataset lowerCAmelCase_ = load_dataset("nielsr/rvlcdip-demo" ) lowerCAmelCase_ = dataset["train"][0]["image"].convert("RGB" ) lowerCAmelCase_ = image_processor(_a , return_tensors="pt" ).to(_a ) # forward pass with torch.no_grad(): lowerCAmelCase_ = model(**_a ) lowerCAmelCase_ = outputs.logits lowerCAmelCase_ = torch.Size((1, 16) ) self.assertEqual(logits.shape , _a ) lowerCAmelCase_ = torch.tensor( [-0.4_1_5_8, -0.4_0_9_2, -0.4_3_4_7] , device=_a , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , _a , atol=1E-4 ) )
22
import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging lowerCamelCase__ = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt'''] lowerCamelCase__ = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse('''0.9.0'''): raise Exception('''requires fairseq >= 0.9.0''') logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = ''' Hello world! cécé herlolip''' lowerCamelCase__ = [ ('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''), ('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''), ('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''), ('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''), ] def A(__a: Any ): lowerCAmelCase_ = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "_float_tensor", ] for k in ignore_keys: state_dict.pop(__a , __a ) def A(__a: Optional[int] , __a: List[Any] , __a: Union[str, Any] ): lowerCAmelCase_ = dct.pop(__a ) lowerCAmelCase_ = val def A(__a: Tuple ): lowerCAmelCase_ = torch.load(__a , map_location="cpu" ) lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval() hub_interface.model.load_state_dict(sd["model"] ) return hub_interface def A(__a: List[str] ): lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape lowerCAmelCase_ = nn.Linear(__a , __a , bias=__a ) lowerCAmelCase_ = emb.weight.data return lin_layer @torch.no_grad() def A(__a: Tuple , __a: Union[str, Any] , __a: str=None ): if not os.path.exists(__a ): lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , __a ).eval() else: lowerCAmelCase_ = load_xsum_checkpoint(__a ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: lowerCAmelCase_ = checkpoint_path.replace("." , "-" ) lowerCAmelCase_ = BartConfig.from_pretrained(__a ) lowerCAmelCase_ = bart.encode(__a ).unsqueeze(0 ) lowerCAmelCase_ = BartTokenizer.from_pretrained(__a ).encode(__a , return_tensors="pt" ).unsqueeze(0 ) if not torch.eq(__a , __a ).all(): raise ValueError( F"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" ) if checkpoint_path == "bart.large.mnli": lowerCAmelCase_ = bart.state_dict() remove_ignore_keys_(__a ) lowerCAmelCase_ = state_dict["model.decoder.embed_tokens.weight"] for src, dest in mnli_rename_keys: rename_key(__a , __a , __a ) lowerCAmelCase_ = BartForSequenceClassification(__a ).eval() model.load_state_dict(__a ) lowerCAmelCase_ = bart.predict("mnli" , __a , return_logits=__a ) lowerCAmelCase_ = model(__a )[0] # logits else: # no classification heads to worry about lowerCAmelCase_ = bart.model.state_dict() remove_ignore_keys_(__a ) lowerCAmelCase_ = state_dict["decoder.embed_tokens.weight"] lowerCAmelCase_ = bart.extract_features(__a ) if hf_checkpoint_name == "facebook/bart-large": lowerCAmelCase_ = BartModel(__a ).eval() model.load_state_dict(__a ) lowerCAmelCase_ = model(__a ).model[0] else: lowerCAmelCase_ = BartForConditionalGeneration(__a ).eval() # an existing summarization ckpt model.model.load_state_dict(__a ) if hasattr(__a , "lm_head" ): lowerCAmelCase_ = make_linear_from_emb(model.model.shared ) lowerCAmelCase_ = model.model(__a )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( F"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" ) Path(__a ).mkdir(exist_ok=__a ) model.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum''' ) lowerCamelCase__ = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
22
1
from typing import TYPE_CHECKING from ...utils import _LazyModule lowerCamelCase__ = {'''tokenization_byt5''': ['''ByT5Tokenizer''']} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = MobileBertTokenizer lowerCamelCase__ = MobileBertTokenizerFast lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = filter_non_english lowerCamelCase__ = '''google/mobilebert-uncased''' def __a ( self ) -> Optional[Any]: super().setUp() lowerCAmelCase_ = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) lowerCAmelCase_ = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def __a ( self , _a ) -> Any: lowerCAmelCase_ = "UNwant\u00E9d,running" lowerCAmelCase_ = "unwanted, running" return input_text, output_text def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = self.tokenizer_class(self.vocab_file ) lowerCAmelCase_ = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(_a , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] ) def __a ( self ) -> Tuple: if not self.test_rust_tokenizer: return lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = "UNwant\u00E9d,running" lowerCAmelCase_ = tokenizer.tokenize(_a ) lowerCAmelCase_ = rust_tokenizer.tokenize(_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = tokenizer.encode(_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a ) self.assertListEqual(_a , _a ) # With lower casing lowerCAmelCase_ = self.get_tokenizer(do_lower_case=_a ) lowerCAmelCase_ = self.get_rust_tokenizer(do_lower_case=_a ) lowerCAmelCase_ = "UNwant\u00E9d,running" lowerCAmelCase_ = tokenizer.tokenize(_a ) lowerCAmelCase_ = rust_tokenizer.tokenize(_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = tokenizer.encode(_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a ) self.assertListEqual(_a , _a ) def __a ( self ) -> Any: lowerCAmelCase_ = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def __a ( self ) -> Dict: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __a ( self ) -> List[Any]: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def __a ( self ) -> str: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __a ( self ) -> str: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __a ( self ) -> str: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def __a ( self ) -> List[str]: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def __a ( self ) -> Any: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def __a ( self ) -> Any: lowerCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] lowerCAmelCase_ = {} for i, token in enumerate(_a ): lowerCAmelCase_ = i lowerCAmelCase_ = WordpieceTokenizer(vocab=_a , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def __a ( self ) -> Optional[int]: self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def __a ( self ) -> List[str]: self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def __a ( self ) -> Dict: self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def __a ( self ) -> Any: lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) self.assertListEqual( [rust_tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) @slow def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" ) lowerCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a ) lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a , _a ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def __a ( self ) -> Union[str, Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence." lowerCAmelCase_ = tokenizer_r.encode_plus( _a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , ) lowerCAmelCase_ = tokenizer_r.do_lower_case if hasattr(_a , "do_lower_case" ) else False lowerCAmelCase_ = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def __a ( self ) -> Optional[int]: lowerCAmelCase_ = ["的", "人", "有"] lowerCAmelCase_ = "".join(_a ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCAmelCase_ = True lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a ) lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_a , _a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = False lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a ) lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a ) # it is expected that only the first Chinese character is not preceded by "##". lowerCAmelCase_ = [ f"##{token}" if idx != 0 else token for idx, token in enumerate(_a ) ] self.assertListEqual(_a , _a ) self.assertListEqual(_a , _a )
22
1
from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar lowerCamelCase__ = TypeVar('''KEY''') lowerCamelCase__ = TypeVar('''VAL''') @dataclass(frozen=__lowercase , slots=__lowercase ) class __magic_name__ (Generic[KEY, VAL] ): lowerCamelCase__ = 42 lowerCamelCase__ = 42 class __magic_name__ (_Item ): def __init__( self ) -> None: super().__init__(_a , _a ) def __bool__( self ) -> bool: return False lowerCamelCase__ = _DeletedItem() class __magic_name__ (MutableMapping[KEY, VAL] ): def __init__( self , _a = 8 , _a = 0.7_5 ) -> None: lowerCAmelCase_ = initial_block_size lowerCAmelCase_ = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 lowerCAmelCase_ = capacity_factor lowerCAmelCase_ = 0 def __a ( self , _a ) -> int: return hash(_a ) % len(self._buckets ) def __a ( self , _a ) -> int: return (ind + 1) % len(self._buckets ) def __a ( self , _a , _a , _a ) -> bool: lowerCAmelCase_ = self._buckets[ind] if not stored: lowerCAmelCase_ = _Item(_a , _a ) self._len += 1 return True elif stored.key == key: lowerCAmelCase_ = _Item(_a , _a ) return True else: return False def __a ( self ) -> bool: lowerCAmelCase_ = len(self._buckets ) * self._capacity_factor return len(self ) >= int(_a ) def __a ( self ) -> bool: if len(self._buckets ) <= self._initial_block_size: return False lowerCAmelCase_ = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def __a ( self , _a ) -> None: lowerCAmelCase_ = self._buckets lowerCAmelCase_ = [None] * new_size lowerCAmelCase_ = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def __a ( self ) -> None: self._resize(len(self._buckets ) * 2 ) def __a ( self ) -> None: self._resize(len(self._buckets ) // 2 ) def __a ( self , _a ) -> Iterator[int]: lowerCAmelCase_ = self._get_bucket_index(_a ) for _ in range(len(self._buckets ) ): yield ind lowerCAmelCase_ = self._get_next_ind(_a ) def __a ( self , _a , _a ) -> None: for ind in self._iterate_buckets(_a ): if self._try_set(_a , _a , _a ): break def __setitem__( self , _a , _a ) -> None: if self._is_full(): self._size_up() self._add_item(_a , _a ) def __delitem__( self , _a ) -> None: for ind in self._iterate_buckets(_a ): lowerCAmelCase_ = self._buckets[ind] if item is None: raise KeyError(_a ) if item is _deleted: continue if item.key == key: lowerCAmelCase_ = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self , _a ) -> VAL: for ind in self._iterate_buckets(_a ): lowerCAmelCase_ = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(_a ) def __len__( self ) -> int: return self._len def __iter__( self ) -> Iterator[KEY]: yield from (item.key for item in self._buckets if item) def __repr__( self ) -> str: lowerCAmelCase_ = " ,".join( f"{item.key}: {item.val}" for item in self._buckets if item ) return f"HashMap({val_string})"
22
import math from collections.abc import Iterator from itertools import takewhile def A(__a: int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A(): lowerCAmelCase_ = 2 while True: if is_prime(__a ): yield num num += 1 def A(__a: int = 200_0000 ): return sum(takewhile(lambda __a : x < n , prime_generator() ) ) if __name__ == "__main__": print(F'''{solution() = }''')
22
1
import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef lowerCamelCase__ = ( '''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' ) def A(__a: str , __a: List[Any] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) return (preds == labels).mean() def A(__a: Any , __a: Any ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) lowerCAmelCase_ = simple_accuracy(__a , __a ) lowerCAmelCase_ = fa_score(y_true=__a , y_pred=__a ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def A(__a: List[str] , __a: Optional[int] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) lowerCAmelCase_ = pearsonr(__a , __a )[0] lowerCAmelCase_ = spearmanr(__a , __a )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def A(__a: Union[str, Any] , __a: Any , __a: str ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) assert len(__a ) == len(__a ), F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" if task_name == "cola": return {"mcc": matthews_corrcoef(__a , __a )} elif task_name == "sst-2": return {"acc": simple_accuracy(__a , __a )} elif task_name == "mrpc": return acc_and_fa(__a , __a ) elif task_name == "sts-b": return pearson_and_spearman(__a , __a ) elif task_name == "qqp": return acc_and_fa(__a , __a ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(__a , __a )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(__a , __a )} elif task_name == "qnli": return {"acc": simple_accuracy(__a , __a )} elif task_name == "rte": return {"acc": simple_accuracy(__a , __a )} elif task_name == "wnli": return {"acc": simple_accuracy(__a , __a )} elif task_name == "hans": return {"acc": simple_accuracy(__a , __a )} else: raise KeyError(__a ) def A(__a: int , __a: Optional[Any] , __a: Optional[Any] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) if len(__a ) != len(__a ): raise ValueError(F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" ) if task_name == "xnli": return {"acc": simple_accuracy(__a , __a )} else: raise KeyError(__a )
22
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { '''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''', '''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''', '''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''', '''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''', # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''mobilenet_v2''' def __init__( self , _a=3 , _a=224 , _a=1.0 , _a=8 , _a=8 , _a=6 , _a=32 , _a=True , _a=True , _a="relu6" , _a=True , _a=0.8 , _a=0.0_2 , _a=0.0_0_1 , _a=255 , **_a , ) -> Dict: super().__init__(**_a ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) lowerCAmelCase_ = num_channels lowerCAmelCase_ = image_size lowerCAmelCase_ = depth_multiplier lowerCAmelCase_ = depth_divisible_by lowerCAmelCase_ = min_depth lowerCAmelCase_ = expand_ratio lowerCAmelCase_ = output_stride lowerCAmelCase_ = first_layer_is_expansion lowerCAmelCase_ = finegrained_output lowerCAmelCase_ = hidden_act lowerCAmelCase_ = tf_padding lowerCAmelCase_ = classifier_dropout_prob lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = semantic_loss_ignore_index class __magic_name__ (__lowercase ): lowerCamelCase__ = version.parse('''1.11''' ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict([("pixel_values", {0: "batch"})] ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def __a ( self ) -> float: return 1E-4
22
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { '''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''', '''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''', '''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''', '''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''', # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''mobilenet_v2''' def __init__( self , _a=3 , _a=224 , _a=1.0 , _a=8 , _a=8 , _a=6 , _a=32 , _a=True , _a=True , _a="relu6" , _a=True , _a=0.8 , _a=0.0_2 , _a=0.0_0_1 , _a=255 , **_a , ) -> Dict: super().__init__(**_a ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) lowerCAmelCase_ = num_channels lowerCAmelCase_ = image_size lowerCAmelCase_ = depth_multiplier lowerCAmelCase_ = depth_divisible_by lowerCAmelCase_ = min_depth lowerCAmelCase_ = expand_ratio lowerCAmelCase_ = output_stride lowerCAmelCase_ = first_layer_is_expansion lowerCAmelCase_ = finegrained_output lowerCAmelCase_ = hidden_act lowerCAmelCase_ = tf_padding lowerCAmelCase_ = classifier_dropout_prob lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = semantic_loss_ignore_index class __magic_name__ (__lowercase ): lowerCamelCase__ = version.parse('''1.11''' ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict([("pixel_values", {0: "batch"})] ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def __a ( self ) -> float: return 1E-4
22
from __future__ import annotations def A(__a: dict , __a: str ): lowerCAmelCase_ , lowerCAmelCase_ = set(__a ), [start] while stack: lowerCAmelCase_ = stack.pop() explored.add(__a ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(__a ) return explored lowerCamelCase__ = { '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
22
1
import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging lowerCamelCase__ = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt'''] lowerCamelCase__ = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse('''0.9.0'''): raise Exception('''requires fairseq >= 0.9.0''') logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = ''' Hello world! cécé herlolip''' lowerCamelCase__ = [ ('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''), ('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''), ('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''), ('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''), ] def A(__a: Any ): lowerCAmelCase_ = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "_float_tensor", ] for k in ignore_keys: state_dict.pop(__a , __a ) def A(__a: Optional[int] , __a: List[Any] , __a: Union[str, Any] ): lowerCAmelCase_ = dct.pop(__a ) lowerCAmelCase_ = val def A(__a: Tuple ): lowerCAmelCase_ = torch.load(__a , map_location="cpu" ) lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval() hub_interface.model.load_state_dict(sd["model"] ) return hub_interface def A(__a: List[str] ): lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape lowerCAmelCase_ = nn.Linear(__a , __a , bias=__a ) lowerCAmelCase_ = emb.weight.data return lin_layer @torch.no_grad() def A(__a: Tuple , __a: Union[str, Any] , __a: str=None ): if not os.path.exists(__a ): lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , __a ).eval() else: lowerCAmelCase_ = load_xsum_checkpoint(__a ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: lowerCAmelCase_ = checkpoint_path.replace("." , "-" ) lowerCAmelCase_ = BartConfig.from_pretrained(__a ) lowerCAmelCase_ = bart.encode(__a ).unsqueeze(0 ) lowerCAmelCase_ = BartTokenizer.from_pretrained(__a ).encode(__a , return_tensors="pt" ).unsqueeze(0 ) if not torch.eq(__a , __a ).all(): raise ValueError( F"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" ) if checkpoint_path == "bart.large.mnli": lowerCAmelCase_ = bart.state_dict() remove_ignore_keys_(__a ) lowerCAmelCase_ = state_dict["model.decoder.embed_tokens.weight"] for src, dest in mnli_rename_keys: rename_key(__a , __a , __a ) lowerCAmelCase_ = BartForSequenceClassification(__a ).eval() model.load_state_dict(__a ) lowerCAmelCase_ = bart.predict("mnli" , __a , return_logits=__a ) lowerCAmelCase_ = model(__a )[0] # logits else: # no classification heads to worry about lowerCAmelCase_ = bart.model.state_dict() remove_ignore_keys_(__a ) lowerCAmelCase_ = state_dict["decoder.embed_tokens.weight"] lowerCAmelCase_ = bart.extract_features(__a ) if hf_checkpoint_name == "facebook/bart-large": lowerCAmelCase_ = BartModel(__a ).eval() model.load_state_dict(__a ) lowerCAmelCase_ = model(__a ).model[0] else: lowerCAmelCase_ = BartForConditionalGeneration(__a ).eval() # an existing summarization ckpt model.model.load_state_dict(__a ) if hasattr(__a , "lm_head" ): lowerCAmelCase_ = make_linear_from_emb(model.model.shared ) lowerCAmelCase_ = model.model(__a )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( F"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" ) Path(__a ).mkdir(exist_ok=__a ) model.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum''' ) lowerCamelCase__ = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
22
def A(__a: Tuple ): lowerCAmelCase_ = len(__a ) while cur > 1: # Find the maximum number in arr lowerCAmelCase_ = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi lowerCAmelCase_ = arr[mi::-1] + arr[mi + 1 : len(__a )] # Reverse whole list lowerCAmelCase_ = arr[cur - 1 :: -1] + arr[cur : len(__a )] cur -= 1 return arr if __name__ == "__main__": lowerCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip() lowerCamelCase__ = [int(item) for item in user_input.split(''',''')] print(pancake_sort(unsorted))
22
1
import numpy as np import torch from imwatermark import WatermarkEncoder # Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66 lowerCamelCase__ = 0B1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110 # bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 lowerCamelCase__ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] class __magic_name__ : def __init__( self ) -> Any: lowerCAmelCase_ = WATERMARK_BITS lowerCAmelCase_ = WatermarkEncoder() self.encoder.set_watermark("bits" , self.watermark ) def __a ( self , _a ) -> Union[str, Any]: # can't encode images that are smaller than 256 if images.shape[-1] < 256: return images lowerCAmelCase_ = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy() lowerCAmelCase_ = [self.encoder.encode(_a , "dwtDct" ) for image in images] lowerCAmelCase_ = torch.from_numpy(np.array(_a ) ).permute(0 , 3 , 1 , 2 ) lowerCAmelCase_ = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 ) return images
22
import string from math import logaa def A(__a: str , __a: str ): lowerCAmelCase_ = document.translate( str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" ) lowerCAmelCase_ = document_without_punctuation.split(" " ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def A(__a: str , __a: str ): lowerCAmelCase_ = corpus.lower().translate( str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with '' lowerCAmelCase_ = corpus_without_punctuation.split("\n" ) lowerCAmelCase_ = term.lower() return (len([doc for doc in docs if term in doc] ), len(__a )) def A(__a: int , __a: int , __a: List[Any]=False ): if smoothing: if n == 0: raise ValueError("log10(0) is undefined." ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("df must be > 0" ) elif n == 0: raise ValueError("log10(0) is undefined." ) return round(logaa(n / df ) , 3 ) def A(__a: int , __a: int ): return round(tf * idf , 3 )
22
1
def A(__a: int = 100 ): lowerCAmelCase_ = set() lowerCAmelCase_ = 0 lowerCAmelCase_ = n + 1 # maximum limit for a in range(2 , __a ): for b in range(2 , __a ): lowerCAmelCase_ = a**b # calculates the current power collect_powers.add(__a ) # adds the result to the set return len(__a ) if __name__ == "__main__": print('''Number of terms ''', solution(int(str(input()).strip())))
22
import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef lowerCamelCase__ = ( '''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' ) def A(__a: str , __a: List[Any] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) return (preds == labels).mean() def A(__a: Any , __a: Any ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) lowerCAmelCase_ = simple_accuracy(__a , __a ) lowerCAmelCase_ = fa_score(y_true=__a , y_pred=__a ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def A(__a: List[str] , __a: Optional[int] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) lowerCAmelCase_ = pearsonr(__a , __a )[0] lowerCAmelCase_ = spearmanr(__a , __a )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def A(__a: Union[str, Any] , __a: Any , __a: str ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) assert len(__a ) == len(__a ), F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" if task_name == "cola": return {"mcc": matthews_corrcoef(__a , __a )} elif task_name == "sst-2": return {"acc": simple_accuracy(__a , __a )} elif task_name == "mrpc": return acc_and_fa(__a , __a ) elif task_name == "sts-b": return pearson_and_spearman(__a , __a ) elif task_name == "qqp": return acc_and_fa(__a , __a ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(__a , __a )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(__a , __a )} elif task_name == "qnli": return {"acc": simple_accuracy(__a , __a )} elif task_name == "rte": return {"acc": simple_accuracy(__a , __a )} elif task_name == "wnli": return {"acc": simple_accuracy(__a , __a )} elif task_name == "hans": return {"acc": simple_accuracy(__a , __a )} else: raise KeyError(__a ) def A(__a: int , __a: Optional[Any] , __a: Optional[Any] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) if len(__a ) != len(__a ): raise ValueError(F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" ) if task_name == "xnli": return {"acc": simple_accuracy(__a , __a )} else: raise KeyError(__a )
22
1
import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class __magic_name__ (__lowercase ): lowerCamelCase__ = '''Speech2TextFeatureExtractor''' lowerCamelCase__ = '''Speech2TextTokenizer''' def __init__( self , _a , _a ) -> List[str]: super().__init__(_a , _a ) lowerCAmelCase_ = self.feature_extractor lowerCAmelCase_ = False def __call__( self , *_a , **_a ) -> Any: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_a , **_a ) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." ) lowerCAmelCase_ = kwargs.pop("raw_speech" ) else: lowerCAmelCase_ = kwargs.pop("audio" , _a ) lowerCAmelCase_ = kwargs.pop("sampling_rate" , _a ) lowerCAmelCase_ = kwargs.pop("text" , _a ) if len(_a ) > 0: lowerCAmelCase_ = args[0] lowerCAmelCase_ = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if audio is not None: lowerCAmelCase_ = self.feature_extractor(_a , *_a , sampling_rate=_a , **_a ) if text is not None: lowerCAmelCase_ = self.tokenizer(_a , **_a ) if text is None: return inputs elif audio is None: return encodings else: lowerCAmelCase_ = encodings["input_ids"] return inputs def __a ( self , *_a , **_a ) -> Any: return self.tokenizer.batch_decode(*_a , **_a ) def __a ( self , *_a , **_a ) -> List[str]: return self.tokenizer.decode(*_a , **_a ) @contextmanager def __a ( self ) -> Optional[int]: warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call." ) lowerCAmelCase_ = True lowerCAmelCase_ = self.tokenizer yield lowerCAmelCase_ = self.feature_extractor lowerCAmelCase_ = False
22
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __magic_name__ (__lowercase ): lowerCamelCase__ = ['''image_processor''', '''tokenizer'''] lowerCamelCase__ = '''ViTImageProcessor''' lowerCamelCase__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self , _a=None , _a=None , **_a ) -> Tuple: lowerCAmelCase_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , _a , ) lowerCAmelCase_ = kwargs.pop("feature_extractor" ) lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_a , _a ) def __call__( self , _a=None , _a=None , _a=None , _a=None , **_a ) -> Dict: if text is None and visual_prompt is None and images is None: raise ValueError("You have to specify either text, visual prompt or images." ) if text is not None and visual_prompt is not None: raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." ) if text is not None: lowerCAmelCase_ = self.tokenizer(_a , return_tensors=_a , **_a ) if visual_prompt is not None: lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a ) if images is not None: lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a ) if visual_prompt is not None and images is not None: lowerCAmelCase_ = { "pixel_values": image_features.pixel_values, "conditional_pixel_values": prompt_features.pixel_values, } return encoding elif text is not None and images is not None: lowerCAmelCase_ = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: lowerCAmelCase_ = { "conditional_pixel_values": prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**_a ) , tensor_type=_a ) def __a ( self , *_a , **_a ) -> List[str]: return self.tokenizer.batch_decode(*_a , **_a ) def __a ( self , *_a , **_a ) -> Optional[int]: return self.tokenizer.decode(*_a , **_a ) @property def __a ( self ) -> List[str]: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _a , ) return self.image_processor_class @property def __a ( self ) -> Optional[Any]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _a , ) return self.image_processor
22
1
def A(__a: int ): if not isinstance(__a , __a ): raise ValueError("multiplicative_persistence() only accepts integral values" ) if num < 0: raise ValueError("multiplicative_persistence() does not accept negative values" ) lowerCAmelCase_ = 0 lowerCAmelCase_ = str(__a ) while len(__a ) != 1: lowerCAmelCase_ = [int(__a ) for i in num_string] lowerCAmelCase_ = 1 for i in range(0 , len(__a ) ): total *= numbers[i] lowerCAmelCase_ = str(__a ) steps += 1 return steps def A(__a: int ): if not isinstance(__a , __a ): raise ValueError("additive_persistence() only accepts integral values" ) if num < 0: raise ValueError("additive_persistence() does not accept negative values" ) lowerCAmelCase_ = 0 lowerCAmelCase_ = str(__a ) while len(__a ) != 1: lowerCAmelCase_ = [int(__a ) for i in num_string] lowerCAmelCase_ = 0 for i in range(0 , len(__a ) ): total += numbers[i] lowerCAmelCase_ = str(__a ) steps += 1 return steps if __name__ == "__main__": import doctest doctest.testmod()
22
import datasets lowerCamelCase__ = '''\ @InProceedings{conneau2018xnli, author = "Conneau, Alexis and Rinott, Ruty and Lample, Guillaume and Williams, Adina and Bowman, Samuel R. and Schwenk, Holger and Stoyanov, Veselin", title = "XNLI: Evaluating Cross-lingual Sentence Representations", booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", year = "2018", publisher = "Association for Computational Linguistics", location = "Brussels, Belgium", } ''' lowerCamelCase__ = '''\ XNLI is a subset of a few thousand examples from MNLI which has been translated into a 14 different languages (some low-ish resource). As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels). ''' lowerCamelCase__ = ''' Computes XNLI score which is just simple accuracy. Args: predictions: Predicted labels. references: Ground truth labels. Returns: \'accuracy\': accuracy Examples: >>> predictions = [0, 1] >>> references = [0, 1] >>> xnli_metric = datasets.load_metric("xnli") >>> results = xnli_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} ''' def A(__a: Dict , __a: Union[str, Any] ): return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ (datasets.Metric ): def __a ( self ) -> Tuple: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), "references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), } ) , codebase_urls=[] , reference_urls=[] , format="numpy" , ) def __a ( self , _a , _a ) -> List[str]: return {"accuracy": simple_accuracy(_a , _a )}
22
1
import os from datetime import datetime as dt from github import Github lowerCamelCase__ = [ '''good first issue''', '''feature request''', '''wip''', ] def A(): lowerCAmelCase_ = Github(os.environ["GITHUB_TOKEN"] ) lowerCAmelCase_ = g.get_repo("huggingface/accelerate" ) lowerCAmelCase_ = repo.get_issues(state="open" ) for issue in open_issues: lowerCAmelCase_ = sorted([comment for comment in issue.get_comments()] , key=lambda __a : i.created_at , reverse=__a ) lowerCAmelCase_ = comments[0] if len(__a ) > 0 else None lowerCAmelCase_ = dt.utcnow() lowerCAmelCase_ = (current_time - issue.updated_at).days lowerCAmelCase_ = (current_time - issue.created_at).days if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and days_since_updated > 7 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Close issue since it has been 7 days of inactivity since bot mention. issue.edit(state="closed" ) elif ( days_since_updated > 23 and days_since_creation >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Add stale comment issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) if __name__ == "__main__": main()
22
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset lowerCamelCase__ = '''bert-base-cased''' lowerCamelCase__ = '''google/pegasus-xsum''' lowerCamelCase__ = [''' Sam ate lunch today.''', '''Sams lunch ingredients.'''] lowerCamelCase__ = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee'''] lowerCamelCase__ = '''patrickvonplaten/t5-tiny-random''' lowerCamelCase__ = '''sshleifer/bart-tiny-random''' lowerCamelCase__ = '''sshleifer/tiny-mbart''' lowerCamelCase__ = '''sshleifer/tiny-marian-en-de''' def A(__a: Path , __a: list ): lowerCAmelCase_ = "\n".join(__a ) Path(__a ).open("w" ).writelines(__a ) def A(__a: str ): for split in ["train", "val", "test"]: _dump_articles(os.path.join(__a , F"{split}.source" ) , __a ) _dump_articles(os.path.join(__a , F"{split}.target" ) , __a ) return tmp_dir class __magic_name__ (__lowercase ): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def __a ( self , _a ) -> Dict: lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a ) lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES ) lowerCAmelCase_ = 4 lowerCAmelCase_ = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated lowerCAmelCase_ , lowerCAmelCase_ = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error. lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , src_lang=_a , tgt_lang=_a , ) lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(_a , _a ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place lowerCAmelCase_ = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def __a ( self , _a ) -> str: lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a ) lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES ) lowerCAmelCase_ = 4 lowerCAmelCase_ = LegacySeqaSeqDataset( _a , data_dir=_a , type_path="train" , max_source_length=20 , max_target_length=_a , ) lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" ) lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) lowerCAmelCase_ = tmp_dir.joinpath("train.source" ).open().readlines() lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(_a , _a , 128 , _a ) lowerCAmelCase_ = {x.name for x in tmp_dir.iterdir()} lowerCAmelCase_ = {x.name for x in save_dir.iterdir()} lowerCAmelCase_ = save_dir.joinpath("train.source" ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(_a ) < len(_a ) assert len(_a ) == 1 assert len(packed_examples[0] ) == sum(len(_a ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" ) def __a ( self ) -> Any: if not FAIRSEQ_AVAILABLE: return lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=64 ) lowerCAmelCase_ = 64 lowerCAmelCase_ = ds.make_dynamic_sampler(_a , required_batch_size_multiple=_a ) lowerCAmelCase_ = [len(_a ) for x in batch_sampler] assert len(set(_a ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(_a ) == len(_a ) # no dropped or added examples lowerCAmelCase_ = DataLoader(_a , batch_sampler=_a , collate_fn=ds.collate_fn , num_workers=2 ) lowerCAmelCase_ = [] lowerCAmelCase_ = [] for batch in data_loader: lowerCAmelCase_ = batch["input_ids"].shape lowerCAmelCase_ = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple lowerCAmelCase_ = np.product(batch["input_ids"].shape ) num_src_per_batch.append(_a ) if num_src_tokens > (max_tokens * 1.1): failures.append(_a ) assert num_src_per_batch[0] == max(_a ) if failures: raise AssertionError(f"too many tokens in {len(_a )} batches" ) def __a ( self ) -> List[str]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=512 ) lowerCAmelCase_ = 2 lowerCAmelCase_ = ds.make_sortish_sampler(_a , shuffle=_a ) lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 ) lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 , sampler=_a ) lowerCAmelCase_ = tokenizer.pad_token_id def count_pad_tokens(_a , _a="input_ids" ): return [batch[k].eq(_a ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(_a , k="labels" ) ) < sum(count_pad_tokens(_a , k="labels" ) ) assert sum(count_pad_tokens(_a ) ) < sum(count_pad_tokens(_a ) ) assert len(_a ) == len(_a ) def __a ( self , _a=1000 , _a=128 ) -> str: if os.getenv("USE_REAL_DATA" , _a ): lowerCAmelCase_ = "examples/seq2seq/wmt_en_ro" lowerCAmelCase_ = max_len * 2 * 64 if not Path(_a ).joinpath("train.len" ).exists(): save_len_file(_a , _a ) else: lowerCAmelCase_ = "examples/seq2seq/test_data/wmt_en_ro" lowerCAmelCase_ = max_len * 4 save_len_file(_a , _a ) lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a ) lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , n_obs=_a , ) return ds, max_tokens, tokenizer def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset() lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=0 , add_extra_examples=_a ) ) lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=1 , add_extra_examples=_a ) ) assert idsa.intersection(_a ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def __a ( self , _a ) -> List[str]: lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a , use_fast=_a ) if tok_name == MBART_TINY: lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , ) lowerCAmelCase_ = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , ) lowerCAmelCase_ = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(_a ) == 1 if tok_name == BART_TINY else len(_a ) == 0
22
1
from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class __magic_name__ (__lowercase ): lowerCamelCase__ = DistilBertTokenizer lowerCamelCase__ = DistilBertTokenizerFast lowerCamelCase__ = True @slow def __a ( self ) -> Optional[int]: lowerCAmelCase_ = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" ) lowerCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a ) lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a , _a ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
22
def A(__a: Optional[Any] ): lowerCAmelCase_ = len(__a ) lowerCAmelCase_ = sum(__a ) lowerCAmelCase_ = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): lowerCAmelCase_ = True for i in range(1 , s + 1 ): lowerCAmelCase_ = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): lowerCAmelCase_ = dp[i][j - 1] if arr[i - 1] <= j: lowerCAmelCase_ = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: lowerCAmelCase_ = s - 2 * j break return diff
22
1
from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def A(__a: Dict[str, torch.Tensor] ): lowerCAmelCase_ = [] lowerCAmelCase_ = [] lowerCAmelCase_ = [] for rt in rc.restypes: lowerCAmelCase_ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] ) lowerCAmelCase_ = {name: i for i, name in enumerate(__a )} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] ) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] ) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14 ) restype_atomaa_to_atomaa_list.append([0] * 37 ) restype_atomaa_mask_list.append([0.0] * 14 ) lowerCAmelCase_ = torch.tensor( __a , dtype=torch.intaa , device=protein["aatype"].device , ) lowerCAmelCase_ = torch.tensor( __a , dtype=torch.intaa , device=protein["aatype"].device , ) lowerCAmelCase_ = torch.tensor( __a , dtype=torch.floataa , device=protein["aatype"].device , ) lowerCAmelCase_ = protein["aatype"].to(torch.long ) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein lowerCAmelCase_ = restype_atomaa_to_atomaa[protein_aatype] lowerCAmelCase_ = restype_atomaa_mask[protein_aatype] lowerCAmelCase_ = residx_atomaa_mask lowerCAmelCase_ = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back lowerCAmelCase_ = restype_atomaa_to_atomaa[protein_aatype] lowerCAmelCase_ = residx_atomaa_to_atomaa.long() # create the corresponding mask lowerCAmelCase_ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device ) for restype, restype_letter in enumerate(rc.restypes ): lowerCAmelCase_ = rc.restype_atoa[restype_letter] lowerCAmelCase_ = rc.residue_atoms[restype_name] for atom_name in atom_names: lowerCAmelCase_ = rc.atom_order[atom_name] lowerCAmelCase_ = 1 lowerCAmelCase_ = restype_atomaa_mask[protein_aatype] lowerCAmelCase_ = residx_atomaa_mask return protein def A(__a: Dict[str, torch.Tensor] ): lowerCAmelCase_ = tree_map(lambda __a : torch.tensor(__a , device=batch["aatype"].device ) , __a , np.ndarray ) lowerCAmelCase_ = tensor_tree_map(lambda __a : np.array(__a ) , make_atomaa_masks(__a ) ) return out
22
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def A(__a: Any , __a: Union[str, Any] , __a: List[str] ): lowerCAmelCase_ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] lowerCAmelCase_ = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } lowerCAmelCase_ = F"{src_lang}-{tgt_lang}" lowerCAmelCase_ = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n" os.makedirs(__a , exist_ok=__a ) lowerCAmelCase_ = os.path.join(__a , "README.md" ) print(F"Generating {path}" ) with open(__a , "w" , encoding="utf-8" ) as f: f.write(__a ) # make sure we are under the root of the project lowerCamelCase__ = Path(__file__).resolve().parent.parent.parent lowerCamelCase__ = repo_dir / '''model_cards''' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model_name.split('''-''') lowerCamelCase__ = model_cards_dir / '''facebook''' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
22
1
from __future__ import annotations def A(__a: list[int] ): return len(set(__a ) ) == len(__a ) if __name__ == "__main__": import doctest doctest.testmod()
22
import re from filelock import FileLock try: import nltk lowerCamelCase__ = True except (ImportError, ModuleNotFoundError): lowerCamelCase__ = False if NLTK_AVAILABLE: with FileLock('''.lock''') as lock: nltk.download('''punkt''', quiet=True) def A(__a: str ): re.sub("<n>" , "" , __a ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__a ) )
22
1
from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING lowerCamelCase__ = logging.get_logger(__name__) @add_end_docstrings(__lowercase ) class __magic_name__ (__lowercase ): def __init__( self , *_a , **_a ) -> Tuple: super().__init__(*_a , **_a ) requires_backends(self , "decord" ) self.check_model_type(_a ) def __a ( self , _a=None , _a=None , _a=None ) -> Dict: lowerCAmelCase_ = {} if frame_sampling_rate is not None: lowerCAmelCase_ = frame_sampling_rate if num_frames is not None: lowerCAmelCase_ = num_frames lowerCAmelCase_ = {} if top_k is not None: lowerCAmelCase_ = top_k return preprocess_params, {}, postprocess_params def __call__( self , _a , **_a ) -> Tuple: return super().__call__(_a , **_a ) def __a ( self , _a , _a=None , _a=1 ) -> List[str]: if num_frames is None: lowerCAmelCase_ = self.model.config.num_frames if video.startswith("http://" ) or video.startswith("https://" ): lowerCAmelCase_ = BytesIO(requests.get(_a ).content ) lowerCAmelCase_ = VideoReader(_a ) videoreader.seek(0 ) lowerCAmelCase_ = 0 lowerCAmelCase_ = num_frames * frame_sampling_rate - 1 lowerCAmelCase_ = np.linspace(_a , _a , num=_a , dtype=np.intaa ) lowerCAmelCase_ = videoreader.get_batch(_a ).asnumpy() lowerCAmelCase_ = list(_a ) lowerCAmelCase_ = self.image_processor(_a , return_tensors=self.framework ) return model_inputs def __a ( self , _a ) -> Any: lowerCAmelCase_ = self.model(**_a ) return model_outputs def __a ( self , _a , _a=5 ) -> Any: if top_k > self.model.config.num_labels: lowerCAmelCase_ = self.model.config.num_labels if self.framework == "pt": lowerCAmelCase_ = model_outputs.logits.softmax(-1 )[0] lowerCAmelCase_ , lowerCAmelCase_ = probs.topk(_a ) else: raise ValueError(f"Unsupported framework: {self.framework}" ) lowerCAmelCase_ = scores.tolist() lowerCAmelCase_ = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_a , _a )]
22
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowerCamelCase__ = { '''configuration_encodec''': [ '''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''EncodecConfig''', ], '''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''', '''EncodecModel''', '''EncodecPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
1
import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __magic_name__ : @staticmethod def __a ( *_a , **_a ) -> Tuple: pass @is_pipeline_test @require_vision @require_timm @require_torch class __magic_name__ (unittest.TestCase ): lowerCamelCase__ = MODEL_FOR_OBJECT_DETECTION_MAPPING def __a ( self , _a , _a , _a ) -> Union[str, Any]: lowerCAmelCase_ = ObjectDetectionPipeline(model=_a , image_processor=_a ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def __a ( self , _a , _a ) -> Optional[Any]: lowerCAmelCase_ = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 ) self.assertGreater(len(_a ) , 0 ) for detected_object in outputs: self.assertEqual( _a , { "score": ANY(_a ), "label": ANY(_a ), "box": {"xmin": ANY(_a ), "ymin": ANY(_a ), "xmax": ANY(_a ), "ymax": ANY(_a )}, } , ) import datasets lowerCAmelCase_ = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" ) lowerCAmelCase_ = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["file"], # LA dataset[1]["file"], # L dataset[2]["file"], ] lowerCAmelCase_ = object_detector(_a , threshold=0.0 ) self.assertEqual(len(_a ) , len(_a ) ) for outputs in batch_outputs: self.assertGreater(len(_a ) , 0 ) for detected_object in outputs: self.assertEqual( _a , { "score": ANY(_a ), "label": ANY(_a ), "box": {"xmin": ANY(_a ), "ymin": ANY(_a ), "xmax": ANY(_a ), "ymax": ANY(_a )}, } , ) @require_tf @unittest.skip("Object detection not implemented in TF" ) def __a ( self ) -> Union[str, Any]: pass @require_torch def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = "hf-internal-testing/tiny-detr-mobilenetsv3" lowerCAmelCase_ = AutoModelForObjectDetection.from_pretrained(_a ) lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained(_a ) lowerCAmelCase_ = ObjectDetectionPipeline(model=_a , feature_extractor=_a ) lowerCAmelCase_ = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 ) self.assertEqual( nested_simplify(_a , decimals=4 ) , [ {"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ] , ) lowerCAmelCase_ = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] , threshold=0.0 , ) self.assertEqual( nested_simplify(_a , decimals=4 ) , [ [ {"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], [ {"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], ] , ) @require_torch @slow def __a ( self ) -> int: lowerCAmelCase_ = "facebook/detr-resnet-50" lowerCAmelCase_ = AutoModelForObjectDetection.from_pretrained(_a ) lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained(_a ) lowerCAmelCase_ = ObjectDetectionPipeline(model=_a , feature_extractor=_a ) lowerCAmelCase_ = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" ) self.assertEqual( nested_simplify(_a , decimals=4 ) , [ {"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ] , ) lowerCAmelCase_ = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(_a , decimals=4 ) , [ [ {"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ] , ) @require_torch @slow def __a ( self ) -> Tuple: lowerCAmelCase_ = "facebook/detr-resnet-50" lowerCAmelCase_ = pipeline("object-detection" , model=_a ) lowerCAmelCase_ = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" ) self.assertEqual( nested_simplify(_a , decimals=4 ) , [ {"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ] , ) lowerCAmelCase_ = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(_a , decimals=4 ) , [ [ {"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ] , ) @require_torch @slow def __a ( self ) -> List[str]: lowerCAmelCase_ = 0.9_9_8_5 lowerCAmelCase_ = "facebook/detr-resnet-50" lowerCAmelCase_ = pipeline("object-detection" , model=_a ) lowerCAmelCase_ = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=_a ) self.assertEqual( nested_simplify(_a , decimals=4 ) , [ {"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ] , ) @require_torch @require_pytesseract @slow def __a ( self ) -> Tuple: lowerCAmelCase_ = "Narsil/layoutlmv3-finetuned-funsd" lowerCAmelCase_ = 0.9_9_9_3 lowerCAmelCase_ = pipeline("object-detection" , model=_a , threshold=_a ) lowerCAmelCase_ = object_detector( "https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" ) self.assertEqual( nested_simplify(_a , decimals=4 ) , [ {"score": 0.9_9_9_3, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}}, {"score": 0.9_9_9_3, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}}, ] , )
22
import logging from transformers import PretrainedConfig lowerCamelCase__ = logging.getLogger(__name__) lowerCamelCase__ = { '''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''', } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''bertabs''' def __init__( self , _a=30522 , _a=512 , _a=6 , _a=512 , _a=8 , _a=512 , _a=0.2 , _a=6 , _a=768 , _a=8 , _a=2048 , _a=0.2 , **_a , ) -> List[Any]: super().__init__(**_a ) lowerCAmelCase_ = vocab_size lowerCAmelCase_ = max_pos lowerCAmelCase_ = enc_layers lowerCAmelCase_ = enc_hidden_size lowerCAmelCase_ = enc_heads lowerCAmelCase_ = enc_ff_size lowerCAmelCase_ = enc_dropout lowerCAmelCase_ = dec_layers lowerCAmelCase_ = dec_hidden_size lowerCAmelCase_ = dec_heads lowerCAmelCase_ = dec_ff_size lowerCAmelCase_ = dec_dropout
22
1
from collections.abc import Callable def A(__a: Callable[[float], float] , __a: float , __a: float ): lowerCAmelCase_ = a lowerCAmelCase_ = b if function(__a ) == 0: # one of the a or b is a root for the function return a elif function(__a ) == 0: return b elif ( function(__a ) * function(__a ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError("could not find root in given interval." ) else: lowerCAmelCase_ = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(__a ) == 0: return mid elif function(__a ) * function(__a ) < 0: lowerCAmelCase_ = mid else: lowerCAmelCase_ = mid lowerCAmelCase_ = start + (end - start) / 2.0 return mid def A(__a: float ): return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 10_00)) import doctest doctest.testmod()
22
import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def A(__a: Tuple , __a: Union[str, Any] ): lowerCAmelCase_ = checkpoint lowerCAmelCase_ = {} lowerCAmelCase_ = vae_state_dict["encoder.conv_in.weight"] lowerCAmelCase_ = vae_state_dict["encoder.conv_in.bias"] lowerCAmelCase_ = vae_state_dict["encoder.conv_out.weight"] lowerCAmelCase_ = vae_state_dict["encoder.conv_out.bias"] lowerCAmelCase_ = vae_state_dict["encoder.norm_out.weight"] lowerCAmelCase_ = vae_state_dict["encoder.norm_out.bias"] lowerCAmelCase_ = vae_state_dict["decoder.conv_in.weight"] lowerCAmelCase_ = vae_state_dict["decoder.conv_in.bias"] lowerCAmelCase_ = vae_state_dict["decoder.conv_out.weight"] lowerCAmelCase_ = vae_state_dict["decoder.conv_out.bias"] lowerCAmelCase_ = vae_state_dict["decoder.norm_out.weight"] lowerCAmelCase_ = vae_state_dict["decoder.norm_out.bias"] lowerCAmelCase_ = vae_state_dict["quant_conv.weight"] lowerCAmelCase_ = vae_state_dict["quant_conv.bias"] lowerCAmelCase_ = vae_state_dict["post_quant_conv.weight"] lowerCAmelCase_ = vae_state_dict["post_quant_conv.bias"] # Retrieves the keys for the encoder down blocks only lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} ) lowerCAmelCase_ = { layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(__a ) } # Retrieves the keys for the decoder up blocks only lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} ) lowerCAmelCase_ = { layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(__a ) } for i in range(__a ): lowerCAmelCase_ = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key] if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict: lowerCAmelCase_ = vae_state_dict.pop( F"encoder.down.{i}.downsample.conv.weight" ) lowerCAmelCase_ = vae_state_dict.pop( F"encoder.down.{i}.downsample.conv.bias" ) lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"down.{i}.block", "new": F"down_blocks.{i}.resnets"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key] lowerCAmelCase_ = 2 for i in range(1 , num_mid_res_blocks + 1 ): lowerCAmelCase_ = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key] lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key] lowerCAmelCase_ = renew_vae_attention_paths(__a ) lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) conv_attn_to_linear(__a ) for i in range(__a ): lowerCAmelCase_ = num_up_blocks - 1 - i lowerCAmelCase_ = [ key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key ] if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict: lowerCAmelCase_ = vae_state_dict[ F"decoder.up.{block_id}.upsample.conv.weight" ] lowerCAmelCase_ = vae_state_dict[ F"decoder.up.{block_id}.upsample.conv.bias" ] lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"up.{block_id}.block", "new": F"up_blocks.{i}.resnets"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key] lowerCAmelCase_ = 2 for i in range(1 , num_mid_res_blocks + 1 ): lowerCAmelCase_ = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key] lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key] lowerCAmelCase_ = renew_vae_attention_paths(__a ) lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) conv_attn_to_linear(__a ) return new_checkpoint def A(__a: str , __a: str , ): # Only support V1 lowerCAmelCase_ = requests.get( " https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" ) lowerCAmelCase_ = io.BytesIO(r.content ) lowerCAmelCase_ = OmegaConf.load(__a ) lowerCAmelCase_ = 512 lowerCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu" if checkpoint_path.endswith("safetensors" ): from safetensors import safe_open lowerCAmelCase_ = {} with safe_open(__a , framework="pt" , device="cpu" ) as f: for key in f.keys(): lowerCAmelCase_ = f.get_tensor(__a ) else: lowerCAmelCase_ = torch.load(__a , map_location=__a )["state_dict"] # Convert the VAE model. lowerCAmelCase_ = create_vae_diffusers_config(__a , image_size=__a ) lowerCAmelCase_ = custom_convert_ldm_vae_checkpoint(__a , __a ) lowerCAmelCase_ = AutoencoderKL(**__a ) vae.load_state_dict(__a ) vae.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') lowerCamelCase__ = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
22
1
import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging lowerCamelCase__ = logging.get_logger(__name__) class __magic_name__ (__lowercase ): lowerCamelCase__ = ['''input_features'''] def __init__( self , _a=80 , _a=16000 , _a=160 , _a=30 , _a=400 , _a=0.0 , _a=False , **_a , ) -> List[str]: super().__init__( feature_size=_a , sampling_rate=_a , padding_value=_a , return_attention_mask=_a , **_a , ) lowerCAmelCase_ = n_fft lowerCAmelCase_ = hop_length lowerCAmelCase_ = chunk_length lowerCAmelCase_ = chunk_length * sampling_rate lowerCAmelCase_ = self.n_samples // hop_length lowerCAmelCase_ = sampling_rate lowerCAmelCase_ = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_a , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=_a , norm="slaney" , mel_scale="slaney" , ) def __a ( self , _a ) -> np.ndarray: lowerCAmelCase_ = spectrogram( _a , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , ) lowerCAmelCase_ = log_spec[:, :-1] lowerCAmelCase_ = np.maximum(_a , log_spec.max() - 8.0 ) lowerCAmelCase_ = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def __a ( _a , _a , _a = 0.0 ) -> List[np.ndarray]: if attention_mask is not None: lowerCAmelCase_ = np.array(_a , np.intaa ) lowerCAmelCase_ = [] for vector, length in zip(_a , attention_mask.sum(-1 ) ): lowerCAmelCase_ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: lowerCAmelCase_ = padding_value normed_input_values.append(_a ) else: lowerCAmelCase_ = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def __call__( self , _a , _a = True , _a = None , _a = None , _a = None , _a = "max_length" , _a = None , _a = None , _a = None , **_a , ) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" f" was sampled with {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) lowerCAmelCase_ = isinstance(_a , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}" ) lowerCAmelCase_ = is_batched_numpy or ( isinstance(_a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowerCAmelCase_ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(_a , np.ndarray ): lowerCAmelCase_ = np.asarray(_a , dtype=np.floataa ) elif isinstance(_a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowerCAmelCase_ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowerCAmelCase_ = [np.asarray([raw_speech] ).T] lowerCAmelCase_ = BatchFeature({"input_features": raw_speech} ) # convert into correct format for padding lowerCAmelCase_ = self.pad( _a , padding=_a , max_length=max_length if max_length else self.n_samples , truncation=_a , pad_to_multiple_of=_a , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: lowerCAmelCase_ = self.zero_mean_unit_var_norm( padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , ) lowerCAmelCase_ = np.stack(padded_inputs["input_features"] , axis=0 ) # make sure list is in array format lowerCAmelCase_ = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 ) lowerCAmelCase_ = [self._np_extract_fbank_features(_a ) for waveform in input_features[0]] if isinstance(input_features[0] , _a ): lowerCAmelCase_ = [np.asarray(_a , dtype=np.floataa ) for feature in input_features] else: lowerCAmelCase_ = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) lowerCAmelCase_ = padded_inputs["attention_mask"][:, :: self.hop_length] if return_tensors is not None: lowerCAmelCase_ = padded_inputs.convert_to_tensors(_a ) return padded_inputs def __a ( self ) -> Dict[str, Any]: lowerCAmelCase_ = copy.deepcopy(self.__dict__ ) lowerCAmelCase_ = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
22
def A(): return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] lowerCamelCase__ = generate_large_matrix() lowerCamelCase__ = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def A(__a: list[list[int]] ): assert all(row == sorted(__a , reverse=__a ) for row in grid ) assert all(list(__a ) == sorted(__a , reverse=__a ) for col in zip(*__a ) ) def A(__a: list[int] ): lowerCAmelCase_ = 0 lowerCAmelCase_ = len(__a ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: lowerCAmelCase_ = (left + right) // 2 lowerCAmelCase_ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: lowerCAmelCase_ = mid + 1 else: lowerCAmelCase_ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__a ) def A(__a: list[list[int]] ): lowerCAmelCase_ = 0 lowerCAmelCase_ = len(grid[0] ) for i in range(len(__a ) ): lowerCAmelCase_ = find_negative_index(grid[i][:bound] ) total += bound return (len(__a ) * len(grid[0] )) - total def A(__a: list[list[int]] ): return len([number for row in grid for number in row if number < 0] ) def A(__a: list[list[int]] ): lowerCAmelCase_ = 0 for row in grid: for i, number in enumerate(__a ): if number < 0: total += len(__a ) - i break return total def A(): from timeit import timeit print("Running benchmarks" ) lowerCAmelCase_ = ( "from __main__ import count_negatives_binary_search, " "count_negatives_brute_force, count_negatives_brute_force_with_break, grid" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): lowerCAmelCase_ = timeit(F"{func}(grid=grid)" , setup=__a , number=500 ) print(F"{func}() took {time:0.4f} seconds" ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
22
1
from __future__ import annotations def A(__a: dict , __a: str ): lowerCAmelCase_ , lowerCAmelCase_ = set(__a ), [start] while stack: lowerCAmelCase_ = stack.pop() explored.add(__a ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(__a ) return explored lowerCamelCase__ = { '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
22
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging lowerCamelCase__ = logging.get_logger(__name__) def A(__a: Dict ): lowerCAmelCase_ = r"\w+[.]\d+" lowerCAmelCase_ = re.findall(__a , __a ) for pat in pats: lowerCAmelCase_ = key.replace(__a , "_".join(pat.split("." ) ) ) return key def A(__a: str , __a: Tuple , __a: List[Any] ): lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",) if ( any("norm" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowerCAmelCase_ = pt_tuple_key[:-1] + ("embedding",) return renamed_pt_tuple_key, pt_tensor # conv layer lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowerCAmelCase_ = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight": lowerCAmelCase_ = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCAmelCase_ = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCAmelCase_ = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def A(__a: Dict , __a: Any , __a: List[Any]=42 ): # Step 1: Convert pytorch tensor to numpy lowerCAmelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowerCAmelCase_ = flax_model.init_weights(PRNGKey(__a ) ) lowerCAmelCase_ = flatten_dict(__a ) lowerCAmelCase_ = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCAmelCase_ = rename_key(__a ) lowerCAmelCase_ = tuple(renamed_pt_key.split("." ) ) # Correctly rename weight parameters lowerCAmelCase_ , lowerCAmelCase_ = rename_key_and_reshape_tensor(__a , __a , __a ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." ) # also add unexpected weight so that warning is thrown lowerCAmelCase_ = jnp.asarray(__a ) return unflatten_dict(__a )
22
1
def A(__a: int ): if not isinstance(__a , __a ): lowerCAmelCase_ = F"Input value of [number={number}] must be an integer" raise TypeError(__a ) if number < 0: return False lowerCAmelCase_ = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
22
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
1
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class __magic_name__ (unittest.TestCase ): def __a ( self ) -> int: lowerCAmelCase_ = tempfile.mkdtemp() lowerCAmelCase_ = BlipImageProcessor() lowerCAmelCase_ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" ) lowerCAmelCase_ = BlipProcessor(_a , _a ) processor.save_pretrained(self.tmpdirname ) def __a ( self , **_a ) -> int: return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).tokenizer def __a ( self , **_a ) -> Optional[int]: return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).image_processor def __a ( self ) -> List[str]: shutil.rmtree(self.tmpdirname ) def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowerCAmelCase_ = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs] return image_inputs def __a ( self ) -> Any: lowerCAmelCase_ = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) lowerCAmelCase_ = self.get_image_processor(do_normalize=_a , padding_value=1.0 ) lowerCAmelCase_ = BlipProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _a ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _a ) def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = self.get_image_processor() lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = BlipProcessor(tokenizer=_a , image_processor=_a ) lowerCAmelCase_ = self.prepare_image_inputs() lowerCAmelCase_ = image_processor(_a , return_tensors="np" ) lowerCAmelCase_ = processor(images=_a , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = self.get_image_processor() lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = BlipProcessor(tokenizer=_a , image_processor=_a ) lowerCAmelCase_ = "lower newer" lowerCAmelCase_ = processor(text=_a ) lowerCAmelCase_ = tokenizer(_a , return_token_type_ids=_a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = self.get_image_processor() lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = BlipProcessor(tokenizer=_a , image_processor=_a ) lowerCAmelCase_ = "lower newer" lowerCAmelCase_ = self.prepare_image_inputs() lowerCAmelCase_ = processor(text=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) # test if it raises when no input is passed with pytest.raises(_a ): processor() def __a ( self ) -> int: lowerCAmelCase_ = self.get_image_processor() lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = BlipProcessor(tokenizer=_a , image_processor=_a ) lowerCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCAmelCase_ = processor.batch_decode(_a ) lowerCAmelCase_ = tokenizer.batch_decode(_a ) self.assertListEqual(_a , _a ) def __a ( self ) -> int: lowerCAmelCase_ = self.get_image_processor() lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = BlipProcessor(tokenizer=_a , image_processor=_a ) lowerCAmelCase_ = "lower newer" lowerCAmelCase_ = self.prepare_image_inputs() lowerCAmelCase_ = processor(text=_a , images=_a ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
22
import math def A(__a: int ): return math.sqrt(__a ) * math.sqrt(__a ) == num def A(__a: int ): lowerCAmelCase_ = 0 lowerCAmelCase_ = n while left <= right: lowerCAmelCase_ = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: lowerCAmelCase_ = mid - 1 else: lowerCAmelCase_ = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
22
1
from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def A(__a: Union[str, Any] ): if isinstance(__a , collections.abc.Iterable ): return x return (x, x) @require_tf class __magic_name__ : def __a ( self , _a , _a ) -> Union[str, Any]: pass def __a ( self ) -> str: pass def __a ( self ) -> Dict: pass def __a ( self , _a , _a , _a , _a , _a=None , **_a ) -> int: lowerCAmelCase_ = VisionTextDualEncoderConfig.from_vision_text_configs(_a , _a ) lowerCAmelCase_ = TFVisionTextDualEncoderModel(_a ) lowerCAmelCase_ = model(input_ids=_a , pixel_values=_a , attention_mask=_a ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) ) def __a ( self , _a , _a , _a , _a , _a=None , **_a ) -> Tuple: lowerCAmelCase_ , lowerCAmelCase_ = self.get_vision_text_model(_a , _a ) lowerCAmelCase_ = TFVisionTextDualEncoderModel(vision_model=_a , text_model=_a ) lowerCAmelCase_ = model(input_ids=_a , pixel_values=_a , attention_mask=_a ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) ) def __a ( self , _a , _a , _a , _a , _a=None , **_a ) -> Optional[Any]: lowerCAmelCase_ , lowerCAmelCase_ = self.get_vision_text_model(_a , _a ) lowerCAmelCase_ = {"vision_model": vision_model, "text_model": text_model} lowerCAmelCase_ = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_a ) lowerCAmelCase_ = model(input_ids=_a , pixel_values=_a , attention_mask=_a ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) ) def __a ( self , _a , _a , _a , _a , _a=None , **_a ) -> List[Any]: lowerCAmelCase_ , lowerCAmelCase_ = self.get_vision_text_model(_a , _a ) lowerCAmelCase_ = TFVisionTextDualEncoderModel(vision_model=_a , text_model=_a ) lowerCAmelCase_ = model(input_ids=_a , pixel_values=_a , attention_mask=_a ) lowerCAmelCase_ = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_a ) lowerCAmelCase_ = TFVisionTextDualEncoderModel.from_pretrained(_a ) lowerCAmelCase_ = model(input_ids=_a , pixel_values=_a , attention_mask=_a ) lowerCAmelCase_ = after_output[0].numpy() lowerCAmelCase_ = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_a , 1E-5 ) def __a ( self , _a , _a , _a , _a , _a=None , **_a ) -> str: lowerCAmelCase_ , lowerCAmelCase_ = self.get_vision_text_model(_a , _a ) lowerCAmelCase_ = TFVisionTextDualEncoderModel(vision_model=_a , text_model=_a ) lowerCAmelCase_ = model( input_ids=_a , pixel_values=_a , attention_mask=_a , output_attentions=_a ) lowerCAmelCase_ = output.vision_model_output.attentions self.assertEqual(len(_a ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCAmelCase_ = to_atuple(vision_model.config.image_size ) lowerCAmelCase_ = to_atuple(vision_model.config.patch_size ) lowerCAmelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowerCAmelCase_ = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) lowerCAmelCase_ = output.text_model_output.attentions self.assertEqual(len(_a ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def __a ( self , _a , _a , _a ) -> Optional[int]: lowerCAmelCase_ = np.abs((a - b) ).max() self.assertLessEqual(_a , _a , f"Difference between torch and flax is {diff} (>= {tol})." ) def __a ( self ) -> List[Any]: lowerCAmelCase_ = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**_a ) def __a ( self ) -> Optional[int]: lowerCAmelCase_ = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**_a ) def __a ( self ) -> List[Any]: lowerCAmelCase_ = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**_a ) def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = self.prepare_config_and_inputs() self.check_save_load(**_a ) def __a ( self ) -> Tuple: lowerCAmelCase_ = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**_a ) @slow def __a ( self ) -> List[str]: lowerCAmelCase_ , lowerCAmelCase_ = self.get_pretrained_model_and_inputs() lowerCAmelCase_ = model_a(**_a ) lowerCAmelCase_ = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(_a ) lowerCAmelCase_ = TFVisionTextDualEncoderModel.from_pretrained(_a ) lowerCAmelCase_ = model_a(**_a ) lowerCAmelCase_ = after_outputs[0].numpy() lowerCAmelCase_ = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_a , 1E-5 ) @require_tf class __magic_name__ (__lowercase , unittest.TestCase ): def __a ( self ) -> Dict: lowerCAmelCase_ = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert" ) lowerCAmelCase_ = 13 lowerCAmelCase_ = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) lowerCAmelCase_ = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) lowerCAmelCase_ = random_attention_mask([batch_size, 4] ) lowerCAmelCase_ = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def __a ( self , _a , _a ) -> Dict: lowerCAmelCase_ = TFViTModel(_a , name="vision_model" ) lowerCAmelCase_ = TFBertModel(_a , name="text_model" ) return vision_model, text_model def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = TFViTModelTester(self ) lowerCAmelCase_ = TFBertModelTester(self ) lowerCAmelCase_ = vit_model_tester.prepare_config_and_inputs() lowerCAmelCase_ = bert_model_tester.prepare_config_and_inputs() lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = vision_config_and_inputs ( ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class __magic_name__ (__lowercase , unittest.TestCase ): def __a ( self ) -> Union[str, Any]: # DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's # just reinitialize it. lowerCAmelCase_ = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta" ) lowerCAmelCase_ = 13 lowerCAmelCase_ = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) lowerCAmelCase_ = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) lowerCAmelCase_ = random_attention_mask([batch_size, 4] ) lowerCAmelCase_ = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def __a ( self , _a , _a , _a , _a , _a=None , **_a ) -> List[str]: lowerCAmelCase_ , lowerCAmelCase_ = self.get_vision_text_model(_a , _a ) lowerCAmelCase_ = TFVisionTextDualEncoderModel(vision_model=_a , text_model=_a ) lowerCAmelCase_ = model( input_ids=_a , pixel_values=_a , attention_mask=_a , output_attentions=_a ) lowerCAmelCase_ = output.vision_model_output.attentions self.assertEqual(len(_a ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) lowerCAmelCase_ = to_atuple(vision_model.config.image_size ) lowerCAmelCase_ = to_atuple(vision_model.config.patch_size ) lowerCAmelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) lowerCAmelCase_ = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) lowerCAmelCase_ = output.text_model_output.attentions self.assertEqual(len(_a ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def __a ( self , _a , _a ) -> Optional[Any]: lowerCAmelCase_ = TFDeiTModel(_a , name="vision_model" ) lowerCAmelCase_ = TFRobertaModel(_a , name="text_model" ) return vision_model, text_model def __a ( self ) -> Optional[int]: lowerCAmelCase_ = TFDeiTModelTester(self ) lowerCAmelCase_ = TFRobertaModelTester(self ) lowerCAmelCase_ = vit_model_tester.prepare_config_and_inputs() lowerCAmelCase_ = bert_model_tester.prepare_config_and_inputs() lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = vision_config_and_inputs ( ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class __magic_name__ (__lowercase , unittest.TestCase ): def __a ( self ) -> Dict: lowerCAmelCase_ = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert" ) lowerCAmelCase_ = 13 lowerCAmelCase_ = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) lowerCAmelCase_ = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) lowerCAmelCase_ = random_attention_mask([batch_size, 4] ) lowerCAmelCase_ = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def __a ( self , _a , _a ) -> List[str]: lowerCAmelCase_ = TFCLIPVisionModel(_a , name="vision_model" ) lowerCAmelCase_ = TFBertModel(_a , name="text_model" ) return vision_model, text_model def __a ( self ) -> Dict: lowerCAmelCase_ = TFCLIPVisionModelTester(self ) lowerCAmelCase_ = TFBertModelTester(self ) lowerCAmelCase_ = clip_model_tester.prepare_config_and_inputs() lowerCAmelCase_ = bert_model_tester.prepare_config_and_inputs() lowerCAmelCase_ , lowerCAmelCase_ = vision_config_and_inputs ( ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class __magic_name__ (unittest.TestCase ): @slow def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = TFVisionTextDualEncoderModel.from_pretrained( "clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=_a ) lowerCAmelCase_ = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" ) lowerCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) lowerCAmelCase_ = processor( text=["una foto di un gatto", "una foto di un cane"] , images=_a , padding=_a , return_tensors="np" ) lowerCAmelCase_ = model(**_a ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) lowerCAmelCase_ = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _a , atol=1E-3 ) )
22
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers lowerCamelCase__ = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append('''dataclasses''') if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append('''importlib_metadata''') for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def A(__a: Dict , __a: List[str]=None ): require_version(deps[pkg] , __a )
22
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''} class __magic_name__ (__lowercase ): lowerCamelCase__ = '''openai-gpt''' lowerCamelCase__ = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , _a=40478 , _a=512 , _a=768 , _a=12 , _a=12 , _a="gelu" , _a=0.1 , _a=0.1 , _a=0.1 , _a=1E-5 , _a=0.0_2 , _a="cls_index" , _a=True , _a=None , _a=True , _a=0.1 , **_a , ) -> int: lowerCAmelCase_ = vocab_size lowerCAmelCase_ = n_positions lowerCAmelCase_ = n_embd lowerCAmelCase_ = n_layer lowerCAmelCase_ = n_head lowerCAmelCase_ = afn lowerCAmelCase_ = resid_pdrop lowerCAmelCase_ = embd_pdrop lowerCAmelCase_ = attn_pdrop lowerCAmelCase_ = layer_norm_epsilon lowerCAmelCase_ = initializer_range lowerCAmelCase_ = summary_type lowerCAmelCase_ = summary_use_proj lowerCAmelCase_ = summary_activation lowerCAmelCase_ = summary_first_dropout lowerCAmelCase_ = summary_proj_to_labels super().__init__(**_a )
22
import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging lowerCamelCase__ = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt'''] lowerCamelCase__ = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse('''0.9.0'''): raise Exception('''requires fairseq >= 0.9.0''') logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = ''' Hello world! cécé herlolip''' lowerCamelCase__ = [ ('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''), ('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''), ('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''), ('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''), ] def A(__a: Any ): lowerCAmelCase_ = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "_float_tensor", ] for k in ignore_keys: state_dict.pop(__a , __a ) def A(__a: Optional[int] , __a: List[Any] , __a: Union[str, Any] ): lowerCAmelCase_ = dct.pop(__a ) lowerCAmelCase_ = val def A(__a: Tuple ): lowerCAmelCase_ = torch.load(__a , map_location="cpu" ) lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval() hub_interface.model.load_state_dict(sd["model"] ) return hub_interface def A(__a: List[str] ): lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape lowerCAmelCase_ = nn.Linear(__a , __a , bias=__a ) lowerCAmelCase_ = emb.weight.data return lin_layer @torch.no_grad() def A(__a: Tuple , __a: Union[str, Any] , __a: str=None ): if not os.path.exists(__a ): lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , __a ).eval() else: lowerCAmelCase_ = load_xsum_checkpoint(__a ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: lowerCAmelCase_ = checkpoint_path.replace("." , "-" ) lowerCAmelCase_ = BartConfig.from_pretrained(__a ) lowerCAmelCase_ = bart.encode(__a ).unsqueeze(0 ) lowerCAmelCase_ = BartTokenizer.from_pretrained(__a ).encode(__a , return_tensors="pt" ).unsqueeze(0 ) if not torch.eq(__a , __a ).all(): raise ValueError( F"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" ) if checkpoint_path == "bart.large.mnli": lowerCAmelCase_ = bart.state_dict() remove_ignore_keys_(__a ) lowerCAmelCase_ = state_dict["model.decoder.embed_tokens.weight"] for src, dest in mnli_rename_keys: rename_key(__a , __a , __a ) lowerCAmelCase_ = BartForSequenceClassification(__a ).eval() model.load_state_dict(__a ) lowerCAmelCase_ = bart.predict("mnli" , __a , return_logits=__a ) lowerCAmelCase_ = model(__a )[0] # logits else: # no classification heads to worry about lowerCAmelCase_ = bart.model.state_dict() remove_ignore_keys_(__a ) lowerCAmelCase_ = state_dict["decoder.embed_tokens.weight"] lowerCAmelCase_ = bart.extract_features(__a ) if hf_checkpoint_name == "facebook/bart-large": lowerCAmelCase_ = BartModel(__a ).eval() model.load_state_dict(__a ) lowerCAmelCase_ = model(__a ).model[0] else: lowerCAmelCase_ = BartForConditionalGeneration(__a ).eval() # an existing summarization ckpt model.model.load_state_dict(__a ) if hasattr(__a , "lm_head" ): lowerCAmelCase_ = make_linear_from_emb(model.model.shared ) lowerCAmelCase_ = model.model(__a )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( F"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" ) Path(__a ).mkdir(exist_ok=__a ) model.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum''' ) lowerCamelCase__ = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
22
1
lowerCamelCase__ = range(2, 20 + 1) lowerCamelCase__ = [10**k for k in range(ks[-1] + 1)] lowerCamelCase__ = {} def A(__a: Any , __a: Dict , __a: Any , __a: Dict ): lowerCAmelCase_ = sum(a_i[j] for j in range(__a , len(__a ) ) ) lowerCAmelCase_ = sum(a_i[j] * base[j] for j in range(min(len(__a ) , __a ) ) ) lowerCAmelCase_ , lowerCAmelCase_ = 0, 0 lowerCAmelCase_ = n - i lowerCAmelCase_ = memo.get(__a ) if sub_memo is not None: lowerCAmelCase_ = sub_memo.get(__a ) if jumps is not None and len(__a ) > 0: # find and make the largest jump without going over lowerCAmelCase_ = -1 for _k in range(len(__a ) - 1 , -1 , -1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: lowerCAmelCase_ = _k break if max_jump >= 0: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = jumps[max_jump] # since the difference between jumps is cached, add c lowerCAmelCase_ = diff + c for j in range(min(__a , len(__a ) ) ): lowerCAmelCase_ , lowerCAmelCase_ = divmod(__a , 10 ) if new_c > 0: add(__a , __a , __a ) else: lowerCAmelCase_ = [] else: lowerCAmelCase_ = {c: []} lowerCAmelCase_ = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps lowerCAmelCase_ , lowerCAmelCase_ = next_term(__a , k - 1 , i + dn , __a ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead lowerCAmelCase_ , lowerCAmelCase_ = compute(__a , __a , i + dn , __a ) diff += _diff dn += terms_jumped lowerCAmelCase_ = sub_memo[c] # keep jumps sorted by # of terms skipped lowerCAmelCase_ = 0 while j < len(__a ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(__a , (diff, dn, k) ) return (diff, dn) def A(__a: Dict , __a: List[Any] , __a: Tuple , __a: Tuple ): if i >= n: return 0, i if k > len(__a ): a_i.extend([0 for _ in range(k - len(__a ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) lowerCAmelCase_ = i lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0, 0, 0 for j in range(len(__a ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 lowerCAmelCase_ = ds_c + ds_b diff += addend lowerCAmelCase_ = 0 for j in range(__a ): lowerCAmelCase_ = a_i[j] + addend lowerCAmelCase_ , lowerCAmelCase_ = divmod(__a , 10 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(__a , __a , __a ) return diff, i - start_i def A(__a: str , __a: int , __a: Union[str, Any] ): for j in range(__a , len(__a ) ): lowerCAmelCase_ = digits[j] + addend if s >= 10: lowerCAmelCase_ , lowerCAmelCase_ = divmod(__a , 10 ) lowerCAmelCase_ = addend // 10 + quotient else: lowerCAmelCase_ = s lowerCAmelCase_ = addend // 10 if addend == 0: break while addend > 0: lowerCAmelCase_ , lowerCAmelCase_ = divmod(__a , 10 ) digits.append(__a ) def A(__a: int = 10**15 ): lowerCAmelCase_ = [1] lowerCAmelCase_ = 1 lowerCAmelCase_ = 0 while True: lowerCAmelCase_ , lowerCAmelCase_ = next_term(__a , 20 , i + dn , __a ) dn += terms_jumped if dn == n - i: break lowerCAmelCase_ = 0 for j in range(len(__a ) ): a_n += digits[j] * 10**j return a_n if __name__ == "__main__": print(F'''{solution() = }''')
22
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = MobileBertTokenizer lowerCamelCase__ = MobileBertTokenizerFast lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = filter_non_english lowerCamelCase__ = '''google/mobilebert-uncased''' def __a ( self ) -> Optional[Any]: super().setUp() lowerCAmelCase_ = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) lowerCAmelCase_ = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def __a ( self , _a ) -> Any: lowerCAmelCase_ = "UNwant\u00E9d,running" lowerCAmelCase_ = "unwanted, running" return input_text, output_text def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = self.tokenizer_class(self.vocab_file ) lowerCAmelCase_ = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(_a , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] ) def __a ( self ) -> Tuple: if not self.test_rust_tokenizer: return lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = "UNwant\u00E9d,running" lowerCAmelCase_ = tokenizer.tokenize(_a ) lowerCAmelCase_ = rust_tokenizer.tokenize(_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = tokenizer.encode(_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a ) self.assertListEqual(_a , _a ) # With lower casing lowerCAmelCase_ = self.get_tokenizer(do_lower_case=_a ) lowerCAmelCase_ = self.get_rust_tokenizer(do_lower_case=_a ) lowerCAmelCase_ = "UNwant\u00E9d,running" lowerCAmelCase_ = tokenizer.tokenize(_a ) lowerCAmelCase_ = rust_tokenizer.tokenize(_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = tokenizer.encode(_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a ) self.assertListEqual(_a , _a ) def __a ( self ) -> Any: lowerCAmelCase_ = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def __a ( self ) -> Dict: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __a ( self ) -> List[Any]: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def __a ( self ) -> str: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __a ( self ) -> str: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __a ( self ) -> str: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def __a ( self ) -> List[str]: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def __a ( self ) -> Any: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def __a ( self ) -> Any: lowerCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] lowerCAmelCase_ = {} for i, token in enumerate(_a ): lowerCAmelCase_ = i lowerCAmelCase_ = WordpieceTokenizer(vocab=_a , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def __a ( self ) -> Optional[int]: self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def __a ( self ) -> List[str]: self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def __a ( self ) -> Dict: self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def __a ( self ) -> Any: lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) self.assertListEqual( [rust_tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) @slow def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" ) lowerCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a ) lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a , _a ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def __a ( self ) -> Union[str, Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence." lowerCAmelCase_ = tokenizer_r.encode_plus( _a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , ) lowerCAmelCase_ = tokenizer_r.do_lower_case if hasattr(_a , "do_lower_case" ) else False lowerCAmelCase_ = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def __a ( self ) -> Optional[int]: lowerCAmelCase_ = ["的", "人", "有"] lowerCAmelCase_ = "".join(_a ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCAmelCase_ = True lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a ) lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_a , _a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = False lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a ) lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a ) # it is expected that only the first Chinese character is not preceded by "##". lowerCAmelCase_ = [ f"##{token}" if idx != 0 else token for idx, token in enumerate(_a ) ] self.assertListEqual(_a , _a ) self.assertListEqual(_a , _a )
22
1
import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging lowerCamelCase__ = '''\ ''' lowerCamelCase__ = ''' Perplexity (PPL) is one of the most common metrics for evaluating language models. It is defined as the exponentiated average negative log-likelihood of a sequence. For more information, see https://huggingface.co/docs/transformers/perplexity ''' lowerCamelCase__ = ''' Args: model_id (str): model used for calculating Perplexity NOTE: Perplexity can only be calculated for causal language models. This includes models such as gpt2, causal variations of bert, causal versions of t5, and more (the full list can be found in the AutoModelForCausalLM documentation here: https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM ) input_texts (list of str): input text, each separate text snippet is one list entry. batch_size (int): the batch size to run texts through the model. Defaults to 16. add_start_token (bool): whether to add the start token to the texts, so the perplexity can include the probability of the first word. Defaults to True. device (str): device to run on, defaults to \'cuda\' when available Returns: perplexity: dictionary containing the perplexity scores for the texts in the input list, as well as the mean perplexity. If one of the input texts is longer than the max input length of the model, then it is truncated to the max length for the perplexity computation. Examples: Example 1: >>> perplexity = datasets.load_metric("perplexity") >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"] >>> results = perplexity.compute(model_id=\'gpt2\', ... add_start_token=False, ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) [\'perplexities\', \'mean_perplexity\'] >>> print(round(results["mean_perplexity"], 2)) 78.22 >>> print(round(results["perplexities"][0], 2)) 11.11 Example 2: >>> perplexity = datasets.load_metric("perplexity") >>> input_texts = datasets.load_dataset("wikitext", ... "wikitext-2-raw-v1", ... split="test")["text"][:50] # doctest:+ELLIPSIS [...] >>> input_texts = [s for s in input_texts if s!=\'\'] >>> results = perplexity.compute(model_id=\'gpt2\', ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) [\'perplexities\', \'mean_perplexity\'] >>> print(round(results["mean_perplexity"], 2)) 60.35 >>> print(round(results["perplexities"][0], 2)) 81.12 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ (datasets.Metric ): def __a ( self ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "input_texts": datasets.Value("string" ), } ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , ) def __a ( self , _a , _a , _a = 16 , _a = True , _a=None ) -> Any: if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": lowerCAmelCase_ = "cuda" else: lowerCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu" lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(_a ) lowerCAmelCase_ = model.to(_a ) lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: lowerCAmelCase_ = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(_a ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" lowerCAmelCase_ = model.config.max_length - 1 else: lowerCAmelCase_ = model.config.max_length lowerCAmelCase_ = tokenizer( _a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , return_tensors="pt" , return_attention_mask=_a , ).to(_a ) lowerCAmelCase_ = encodings["input_ids"] lowerCAmelCase_ = encodings["attention_mask"] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." lowerCAmelCase_ = [] lowerCAmelCase_ = CrossEntropyLoss(reduction="none" ) for start_index in logging.tqdm(range(0 , len(_a ) , _a ) ): lowerCAmelCase_ = min(start_index + batch_size , len(_a ) ) lowerCAmelCase_ = encoded_texts[start_index:end_index] lowerCAmelCase_ = attn_masks[start_index:end_index] if add_start_token: lowerCAmelCase_ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_a ) lowerCAmelCase_ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) lowerCAmelCase_ = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_a ), attn_mask] , dim=1 ) lowerCAmelCase_ = encoded_batch with torch.no_grad(): lowerCAmelCase_ = model(_a , attention_mask=_a ).logits lowerCAmelCase_ = out_logits[..., :-1, :].contiguous() lowerCAmelCase_ = labels[..., 1:].contiguous() lowerCAmelCase_ = attn_mask[..., 1:].contiguous() lowerCAmelCase_ = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , _a ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(_a )}
22
import math from collections.abc import Iterator from itertools import takewhile def A(__a: int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A(): lowerCAmelCase_ = 2 while True: if is_prime(__a ): yield num num += 1 def A(__a: int = 200_0000 ): return sum(takewhile(lambda __a : x < n , prime_generator() ) ) if __name__ == "__main__": print(F'''{solution() = }''')
22
1
from __future__ import annotations import time from collections.abc import Sequence from random import randint from matplotlib import pyplot as plt def A(__a: Sequence[float] , __a: int , __a: int ): if not arr: return None, None, 0 if low == high: return low, high, arr[low] lowerCAmelCase_ = (low + high) // 2 lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = max_subarray(__a , __a , __a ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = max_subarray(__a , mid + 1 , __a ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = max_cross_sum(__a , __a , __a , __a ) if left_sum >= right_sum and left_sum >= cross_sum: return left_low, left_high, left_sum elif right_sum >= left_sum and right_sum >= cross_sum: return right_low, right_high, right_sum return cross_left, cross_right, cross_sum def A(__a: Sequence[float] , __a: int , __a: int , __a: int ): lowerCAmelCase_ , lowerCAmelCase_ = float("-inf" ), -1 lowerCAmelCase_ , lowerCAmelCase_ = float("-inf" ), -1 lowerCAmelCase_ = 0 for i in range(__a , low - 1 , -1 ): summ += arr[i] if summ > left_sum: lowerCAmelCase_ = summ lowerCAmelCase_ = i lowerCAmelCase_ = 0 for i in range(mid + 1 , high + 1 ): summ += arr[i] if summ > right_sum: lowerCAmelCase_ = summ lowerCAmelCase_ = i return max_left, max_right, (left_sum + right_sum) def A(__a: int ): lowerCAmelCase_ = [randint(1 , __a ) for _ in range(__a )] lowerCAmelCase_ = time.time() max_subarray(__a , 0 , input_size - 1 ) lowerCAmelCase_ = time.time() return end - start def A(): lowerCAmelCase_ = [10, 100, 1000, 1_0000, 5_0000, 10_0000, 20_0000, 30_0000, 40_0000, 50_0000] lowerCAmelCase_ = [time_max_subarray(__a ) for input_size in input_sizes] print("No of Inputs\t\tTime Taken" ) for input_size, runtime in zip(__a , __a ): print(__a , "\t\t" , __a ) plt.plot(__a , __a ) plt.xlabel("Number of Inputs" ) plt.ylabel("Time taken in seconds" ) plt.show() if __name__ == "__main__": from doctest import testmod testmod()
22
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { '''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''', '''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''', '''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''', '''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''', # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''mobilenet_v2''' def __init__( self , _a=3 , _a=224 , _a=1.0 , _a=8 , _a=8 , _a=6 , _a=32 , _a=True , _a=True , _a="relu6" , _a=True , _a=0.8 , _a=0.0_2 , _a=0.0_0_1 , _a=255 , **_a , ) -> Dict: super().__init__(**_a ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) lowerCAmelCase_ = num_channels lowerCAmelCase_ = image_size lowerCAmelCase_ = depth_multiplier lowerCAmelCase_ = depth_divisible_by lowerCAmelCase_ = min_depth lowerCAmelCase_ = expand_ratio lowerCAmelCase_ = output_stride lowerCAmelCase_ = first_layer_is_expansion lowerCAmelCase_ = finegrained_output lowerCAmelCase_ = hidden_act lowerCAmelCase_ = tf_padding lowerCAmelCase_ = classifier_dropout_prob lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = semantic_loss_ignore_index class __magic_name__ (__lowercase ): lowerCamelCase__ = version.parse('''1.11''' ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict([("pixel_values", {0: "batch"})] ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def __a ( self ) -> float: return 1E-4
22
1
import unittest from transformers.testing_utils import require_bsa from transformers.utils import is_bsa_available from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_bsa_available(): from transformers import MarkupLMFeatureExtractor class __magic_name__ (unittest.TestCase ): def __init__( self , _a ) -> Any: lowerCAmelCase_ = parent def __a ( self ) -> Tuple: return {} def A(): lowerCAmelCase_ = "<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>" lowerCAmelCase_ = "\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n " return [html_string_a, html_string_a] @require_bsa class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = MarkupLMFeatureExtractor if is_bsa_available() else None def __a ( self ) -> List[str]: lowerCAmelCase_ = MarkupLMFeatureExtractionTester(self ) @property def __a ( self ) -> Optional[int]: return self.feature_extract_tester.prepare_feat_extract_dict() def __a ( self ) -> Dict: # Initialize feature_extractor lowerCAmelCase_ = self.feature_extraction_class() # Test not batched input lowerCAmelCase_ = get_html_strings()[0] lowerCAmelCase_ = feature_extractor(_a ) # fmt: off lowerCAmelCase_ = [["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]] lowerCAmelCase_ = [["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]] # fmt: on self.assertEqual(encoding.nodes , _a ) self.assertEqual(encoding.xpaths , _a ) # Test batched lowerCAmelCase_ = get_html_strings() lowerCAmelCase_ = feature_extractor(_a ) # fmt: off lowerCAmelCase_ = expected_nodes + [["My First Heading", "My first paragraph."]] lowerCAmelCase_ = expected_xpaths + [["/html/body/h1", "/html/body/p"]] self.assertEqual(len(encoding.nodes ) , 2 ) self.assertEqual(len(encoding.xpaths ) , 2 ) self.assertEqual(encoding.nodes , _a ) self.assertEqual(encoding.xpaths , _a )
22
from __future__ import annotations def A(__a: dict , __a: str ): lowerCAmelCase_ , lowerCAmelCase_ = set(__a ), [start] while stack: lowerCAmelCase_ = stack.pop() explored.add(__a ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(__a ) return explored lowerCamelCase__ = { '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
22
1
import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def A(__a: Tuple , __a: Union[str, Any] ): lowerCAmelCase_ = checkpoint lowerCAmelCase_ = {} lowerCAmelCase_ = vae_state_dict["encoder.conv_in.weight"] lowerCAmelCase_ = vae_state_dict["encoder.conv_in.bias"] lowerCAmelCase_ = vae_state_dict["encoder.conv_out.weight"] lowerCAmelCase_ = vae_state_dict["encoder.conv_out.bias"] lowerCAmelCase_ = vae_state_dict["encoder.norm_out.weight"] lowerCAmelCase_ = vae_state_dict["encoder.norm_out.bias"] lowerCAmelCase_ = vae_state_dict["decoder.conv_in.weight"] lowerCAmelCase_ = vae_state_dict["decoder.conv_in.bias"] lowerCAmelCase_ = vae_state_dict["decoder.conv_out.weight"] lowerCAmelCase_ = vae_state_dict["decoder.conv_out.bias"] lowerCAmelCase_ = vae_state_dict["decoder.norm_out.weight"] lowerCAmelCase_ = vae_state_dict["decoder.norm_out.bias"] lowerCAmelCase_ = vae_state_dict["quant_conv.weight"] lowerCAmelCase_ = vae_state_dict["quant_conv.bias"] lowerCAmelCase_ = vae_state_dict["post_quant_conv.weight"] lowerCAmelCase_ = vae_state_dict["post_quant_conv.bias"] # Retrieves the keys for the encoder down blocks only lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} ) lowerCAmelCase_ = { layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(__a ) } # Retrieves the keys for the decoder up blocks only lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} ) lowerCAmelCase_ = { layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(__a ) } for i in range(__a ): lowerCAmelCase_ = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key] if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict: lowerCAmelCase_ = vae_state_dict.pop( F"encoder.down.{i}.downsample.conv.weight" ) lowerCAmelCase_ = vae_state_dict.pop( F"encoder.down.{i}.downsample.conv.bias" ) lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"down.{i}.block", "new": F"down_blocks.{i}.resnets"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key] lowerCAmelCase_ = 2 for i in range(1 , num_mid_res_blocks + 1 ): lowerCAmelCase_ = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key] lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key] lowerCAmelCase_ = renew_vae_attention_paths(__a ) lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) conv_attn_to_linear(__a ) for i in range(__a ): lowerCAmelCase_ = num_up_blocks - 1 - i lowerCAmelCase_ = [ key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key ] if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict: lowerCAmelCase_ = vae_state_dict[ F"decoder.up.{block_id}.upsample.conv.weight" ] lowerCAmelCase_ = vae_state_dict[ F"decoder.up.{block_id}.upsample.conv.bias" ] lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"up.{block_id}.block", "new": F"up_blocks.{i}.resnets"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key] lowerCAmelCase_ = 2 for i in range(1 , num_mid_res_blocks + 1 ): lowerCAmelCase_ = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key] lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key] lowerCAmelCase_ = renew_vae_attention_paths(__a ) lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) conv_attn_to_linear(__a ) return new_checkpoint def A(__a: str , __a: str , ): # Only support V1 lowerCAmelCase_ = requests.get( " https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" ) lowerCAmelCase_ = io.BytesIO(r.content ) lowerCAmelCase_ = OmegaConf.load(__a ) lowerCAmelCase_ = 512 lowerCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu" if checkpoint_path.endswith("safetensors" ): from safetensors import safe_open lowerCAmelCase_ = {} with safe_open(__a , framework="pt" , device="cpu" ) as f: for key in f.keys(): lowerCAmelCase_ = f.get_tensor(__a ) else: lowerCAmelCase_ = torch.load(__a , map_location=__a )["state_dict"] # Convert the VAE model. lowerCAmelCase_ = create_vae_diffusers_config(__a , image_size=__a ) lowerCAmelCase_ = custom_convert_ldm_vae_checkpoint(__a , __a ) lowerCAmelCase_ = AutoencoderKL(**__a ) vae.load_state_dict(__a ) vae.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') lowerCamelCase__ = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
22
def A(__a: Tuple ): lowerCAmelCase_ = len(__a ) while cur > 1: # Find the maximum number in arr lowerCAmelCase_ = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi lowerCAmelCase_ = arr[mi::-1] + arr[mi + 1 : len(__a )] # Reverse whole list lowerCAmelCase_ = arr[cur - 1 :: -1] + arr[cur : len(__a )] cur -= 1 return arr if __name__ == "__main__": lowerCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip() lowerCamelCase__ = [int(item) for item in user_input.split(''',''')] print(pancake_sort(unsorted))
22
1
from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_herbert import HerbertTokenizer lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCamelCase__ = { '''vocab_file''': { '''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json''' }, '''merges_file''': { '''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt''' }, } lowerCamelCase__ = {'''allegro/herbert-base-cased''': 5_14} lowerCamelCase__ = {} class __magic_name__ (__lowercase ): lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_INIT_CONFIGURATION lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ = HerbertTokenizer def __init__( self , _a=None , _a=None , _a=None , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a="</s>" , **_a , ) -> Union[str, Any]: super().__init__( _a , _a , tokenizer_file=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , sep_token=_a , **_a , ) def __a ( self , _a , _a = None ) -> List[int]: lowerCAmelCase_ = [self.cls_token_id] lowerCAmelCase_ = [self.sep_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __a ( self , _a , _a = None , _a = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a ) if token_ids_a is None: return [1] + ([0] * len(_a )) + [1] return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1] def __a ( self , _a , _a = None ) -> List[int]: lowerCAmelCase_ = [self.sep_token_id] lowerCAmelCase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __a ( self , _a , _a = None ) -> Tuple[str]: lowerCAmelCase_ = self._tokenizer.model.save(_a , name=_a ) return tuple(_a )
22
import string from math import logaa def A(__a: str , __a: str ): lowerCAmelCase_ = document.translate( str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" ) lowerCAmelCase_ = document_without_punctuation.split(" " ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def A(__a: str , __a: str ): lowerCAmelCase_ = corpus.lower().translate( str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with '' lowerCAmelCase_ = corpus_without_punctuation.split("\n" ) lowerCAmelCase_ = term.lower() return (len([doc for doc in docs if term in doc] ), len(__a )) def A(__a: int , __a: int , __a: List[Any]=False ): if smoothing: if n == 0: raise ValueError("log10(0) is undefined." ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("df must be > 0" ) elif n == 0: raise ValueError("log10(0) is undefined." ) return round(logaa(n / df ) , 3 ) def A(__a: int , __a: int ): return round(tf * idf , 3 )
22
1
def A(): return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] lowerCamelCase__ = generate_large_matrix() lowerCamelCase__ = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def A(__a: list[list[int]] ): assert all(row == sorted(__a , reverse=__a ) for row in grid ) assert all(list(__a ) == sorted(__a , reverse=__a ) for col in zip(*__a ) ) def A(__a: list[int] ): lowerCAmelCase_ = 0 lowerCAmelCase_ = len(__a ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: lowerCAmelCase_ = (left + right) // 2 lowerCAmelCase_ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: lowerCAmelCase_ = mid + 1 else: lowerCAmelCase_ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__a ) def A(__a: list[list[int]] ): lowerCAmelCase_ = 0 lowerCAmelCase_ = len(grid[0] ) for i in range(len(__a ) ): lowerCAmelCase_ = find_negative_index(grid[i][:bound] ) total += bound return (len(__a ) * len(grid[0] )) - total def A(__a: list[list[int]] ): return len([number for row in grid for number in row if number < 0] ) def A(__a: list[list[int]] ): lowerCAmelCase_ = 0 for row in grid: for i, number in enumerate(__a ): if number < 0: total += len(__a ) - i break return total def A(): from timeit import timeit print("Running benchmarks" ) lowerCAmelCase_ = ( "from __main__ import count_negatives_binary_search, " "count_negatives_brute_force, count_negatives_brute_force_with_break, grid" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): lowerCAmelCase_ = timeit(F"{func}(grid=grid)" , setup=__a , number=500 ) print(F"{func}() took {time:0.4f} seconds" ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
22
import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef lowerCamelCase__ = ( '''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' ) def A(__a: str , __a: List[Any] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) return (preds == labels).mean() def A(__a: Any , __a: Any ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) lowerCAmelCase_ = simple_accuracy(__a , __a ) lowerCAmelCase_ = fa_score(y_true=__a , y_pred=__a ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def A(__a: List[str] , __a: Optional[int] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) lowerCAmelCase_ = pearsonr(__a , __a )[0] lowerCAmelCase_ = spearmanr(__a , __a )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def A(__a: Union[str, Any] , __a: Any , __a: str ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) assert len(__a ) == len(__a ), F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" if task_name == "cola": return {"mcc": matthews_corrcoef(__a , __a )} elif task_name == "sst-2": return {"acc": simple_accuracy(__a , __a )} elif task_name == "mrpc": return acc_and_fa(__a , __a ) elif task_name == "sts-b": return pearson_and_spearman(__a , __a ) elif task_name == "qqp": return acc_and_fa(__a , __a ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(__a , __a )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(__a , __a )} elif task_name == "qnli": return {"acc": simple_accuracy(__a , __a )} elif task_name == "rte": return {"acc": simple_accuracy(__a , __a )} elif task_name == "wnli": return {"acc": simple_accuracy(__a , __a )} elif task_name == "hans": return {"acc": simple_accuracy(__a , __a )} else: raise KeyError(__a ) def A(__a: int , __a: Optional[Any] , __a: Optional[Any] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) if len(__a ) != len(__a ): raise ValueError(F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" ) if task_name == "xnli": return {"acc": simple_accuracy(__a , __a )} else: raise KeyError(__a )
22
1
import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) lowerCamelCase__ = logging.getLogger(__name__) def A(__a: str ): lowerCAmelCase_ = git.Repo(search_parent_directories=__a ) lowerCAmelCase_ = { "repo_id": str(__a ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), } with open(os.path.join(__a , "git_log.json" ) , "w" ) as f: json.dump(__a , __a , indent=4 ) def A(__a: Tuple ): if params.n_gpu <= 0: lowerCAmelCase_ = 0 lowerCAmelCase_ = -1 lowerCAmelCase_ = True lowerCAmelCase_ = False return assert torch.cuda.is_available() logger.info("Initializing GPUs" ) if params.n_gpu > 1: assert params.local_rank != -1 lowerCAmelCase_ = int(os.environ["WORLD_SIZE"] ) lowerCAmelCase_ = int(os.environ["N_GPU_NODE"] ) lowerCAmelCase_ = int(os.environ["RANK"] ) # number of nodes / node ID lowerCAmelCase_ = params.world_size // params.n_gpu_per_node lowerCAmelCase_ = params.global_rank // params.n_gpu_per_node lowerCAmelCase_ = True assert params.n_nodes == int(os.environ["N_NODES"] ) assert params.node_id == int(os.environ["NODE_RANK"] ) # local job (single GPU) else: assert params.local_rank == -1 lowerCAmelCase_ = 1 lowerCAmelCase_ = 0 lowerCAmelCase_ = 0 lowerCAmelCase_ = 0 lowerCAmelCase_ = 1 lowerCAmelCase_ = 1 lowerCAmelCase_ = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode lowerCAmelCase_ = params.node_id == 0 and params.local_rank == 0 lowerCAmelCase_ = params.n_nodes > 1 # summary lowerCAmelCase_ = F"--- Global rank: {params.global_rank} - " logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes ) logger.info(PREFIX + "Node ID : %i" % params.node_id ) logger.info(PREFIX + "Local rank : %i" % params.local_rank ) logger.info(PREFIX + "World size : %i" % params.world_size ) logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node ) logger.info(PREFIX + "Master : %s" % str(params.is_master ) ) logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) ) logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) ) logger.info(PREFIX + "Hostname : %s" % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info("Initializing PyTorch distributed" ) torch.distributed.init_process_group( init_method="env://" , backend="nccl" , ) def A(__a: List[str] ): np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
22
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __magic_name__ (__lowercase ): lowerCamelCase__ = ['''image_processor''', '''tokenizer'''] lowerCamelCase__ = '''ViTImageProcessor''' lowerCamelCase__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self , _a=None , _a=None , **_a ) -> Tuple: lowerCAmelCase_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , _a , ) lowerCAmelCase_ = kwargs.pop("feature_extractor" ) lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_a , _a ) def __call__( self , _a=None , _a=None , _a=None , _a=None , **_a ) -> Dict: if text is None and visual_prompt is None and images is None: raise ValueError("You have to specify either text, visual prompt or images." ) if text is not None and visual_prompt is not None: raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." ) if text is not None: lowerCAmelCase_ = self.tokenizer(_a , return_tensors=_a , **_a ) if visual_prompt is not None: lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a ) if images is not None: lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a ) if visual_prompt is not None and images is not None: lowerCAmelCase_ = { "pixel_values": image_features.pixel_values, "conditional_pixel_values": prompt_features.pixel_values, } return encoding elif text is not None and images is not None: lowerCAmelCase_ = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: lowerCAmelCase_ = { "conditional_pixel_values": prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**_a ) , tensor_type=_a ) def __a ( self , *_a , **_a ) -> List[str]: return self.tokenizer.batch_decode(*_a , **_a ) def __a ( self , *_a , **_a ) -> Optional[int]: return self.tokenizer.decode(*_a , **_a ) @property def __a ( self ) -> List[str]: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _a , ) return self.image_processor_class @property def __a ( self ) -> Optional[Any]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _a , ) return self.image_processor
22
1
import colorsys from PIL import Image # type: ignore def A(__a: float , __a: float , __a: int ): lowerCAmelCase_ = x lowerCAmelCase_ = y for step in range(__a ): # noqa: B007 lowerCAmelCase_ = a * a - b * b + x lowerCAmelCase_ = 2 * a * b + y lowerCAmelCase_ = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def A(__a: float ): if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def A(__a: float ): if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(__a , 1 , 1 ) ) def A(__a: int = 800 , __a: int = 600 , __a: float = -0.6 , __a: float = 0 , __a: float = 3.2 , __a: int = 50 , __a: bool = True , ): lowerCAmelCase_ = Image.new("RGB" , (image_width, image_height) ) lowerCAmelCase_ = img.load() # loop through the image-coordinates for image_x in range(__a ): for image_y in range(__a ): # determine the figure-coordinates based on the image-coordinates lowerCAmelCase_ = figure_width / image_width * image_height lowerCAmelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width lowerCAmelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height lowerCAmelCase_ = get_distance(__a , __a , __a ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: lowerCAmelCase_ = get_color_coded_rgb(__a ) else: lowerCAmelCase_ = get_black_and_white_rgb(__a ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure lowerCamelCase__ = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
22
import datasets lowerCamelCase__ = '''\ @InProceedings{conneau2018xnli, author = "Conneau, Alexis and Rinott, Ruty and Lample, Guillaume and Williams, Adina and Bowman, Samuel R. and Schwenk, Holger and Stoyanov, Veselin", title = "XNLI: Evaluating Cross-lingual Sentence Representations", booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", year = "2018", publisher = "Association for Computational Linguistics", location = "Brussels, Belgium", } ''' lowerCamelCase__ = '''\ XNLI is a subset of a few thousand examples from MNLI which has been translated into a 14 different languages (some low-ish resource). As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels). ''' lowerCamelCase__ = ''' Computes XNLI score which is just simple accuracy. Args: predictions: Predicted labels. references: Ground truth labels. Returns: \'accuracy\': accuracy Examples: >>> predictions = [0, 1] >>> references = [0, 1] >>> xnli_metric = datasets.load_metric("xnli") >>> results = xnli_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} ''' def A(__a: Dict , __a: Union[str, Any] ): return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ (datasets.Metric ): def __a ( self ) -> Tuple: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), "references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), } ) , codebase_urls=[] , reference_urls=[] , format="numpy" , ) def __a ( self , _a , _a ) -> List[str]: return {"accuracy": simple_accuracy(_a , _a )}
22
1
from itertools import permutations def A(__a: tuple ): if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False lowerCAmelCase_ = [7, 11, 13, 17] for i, test in enumerate(__a ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def A(__a: int = 10 ): return sum( int("".join(map(__a , __a ) ) ) for num in permutations(range(__a ) ) if is_substring_divisible(__a ) ) if __name__ == "__main__": print(F'''{solution() = }''')
22
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset lowerCamelCase__ = '''bert-base-cased''' lowerCamelCase__ = '''google/pegasus-xsum''' lowerCamelCase__ = [''' Sam ate lunch today.''', '''Sams lunch ingredients.'''] lowerCamelCase__ = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee'''] lowerCamelCase__ = '''patrickvonplaten/t5-tiny-random''' lowerCamelCase__ = '''sshleifer/bart-tiny-random''' lowerCamelCase__ = '''sshleifer/tiny-mbart''' lowerCamelCase__ = '''sshleifer/tiny-marian-en-de''' def A(__a: Path , __a: list ): lowerCAmelCase_ = "\n".join(__a ) Path(__a ).open("w" ).writelines(__a ) def A(__a: str ): for split in ["train", "val", "test"]: _dump_articles(os.path.join(__a , F"{split}.source" ) , __a ) _dump_articles(os.path.join(__a , F"{split}.target" ) , __a ) return tmp_dir class __magic_name__ (__lowercase ): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def __a ( self , _a ) -> Dict: lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a ) lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES ) lowerCAmelCase_ = 4 lowerCAmelCase_ = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated lowerCAmelCase_ , lowerCAmelCase_ = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error. lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , src_lang=_a , tgt_lang=_a , ) lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(_a , _a ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place lowerCAmelCase_ = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def __a ( self , _a ) -> str: lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a ) lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES ) lowerCAmelCase_ = 4 lowerCAmelCase_ = LegacySeqaSeqDataset( _a , data_dir=_a , type_path="train" , max_source_length=20 , max_target_length=_a , ) lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" ) lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) lowerCAmelCase_ = tmp_dir.joinpath("train.source" ).open().readlines() lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(_a , _a , 128 , _a ) lowerCAmelCase_ = {x.name for x in tmp_dir.iterdir()} lowerCAmelCase_ = {x.name for x in save_dir.iterdir()} lowerCAmelCase_ = save_dir.joinpath("train.source" ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(_a ) < len(_a ) assert len(_a ) == 1 assert len(packed_examples[0] ) == sum(len(_a ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" ) def __a ( self ) -> Any: if not FAIRSEQ_AVAILABLE: return lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=64 ) lowerCAmelCase_ = 64 lowerCAmelCase_ = ds.make_dynamic_sampler(_a , required_batch_size_multiple=_a ) lowerCAmelCase_ = [len(_a ) for x in batch_sampler] assert len(set(_a ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(_a ) == len(_a ) # no dropped or added examples lowerCAmelCase_ = DataLoader(_a , batch_sampler=_a , collate_fn=ds.collate_fn , num_workers=2 ) lowerCAmelCase_ = [] lowerCAmelCase_ = [] for batch in data_loader: lowerCAmelCase_ = batch["input_ids"].shape lowerCAmelCase_ = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple lowerCAmelCase_ = np.product(batch["input_ids"].shape ) num_src_per_batch.append(_a ) if num_src_tokens > (max_tokens * 1.1): failures.append(_a ) assert num_src_per_batch[0] == max(_a ) if failures: raise AssertionError(f"too many tokens in {len(_a )} batches" ) def __a ( self ) -> List[str]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=512 ) lowerCAmelCase_ = 2 lowerCAmelCase_ = ds.make_sortish_sampler(_a , shuffle=_a ) lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 ) lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 , sampler=_a ) lowerCAmelCase_ = tokenizer.pad_token_id def count_pad_tokens(_a , _a="input_ids" ): return [batch[k].eq(_a ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(_a , k="labels" ) ) < sum(count_pad_tokens(_a , k="labels" ) ) assert sum(count_pad_tokens(_a ) ) < sum(count_pad_tokens(_a ) ) assert len(_a ) == len(_a ) def __a ( self , _a=1000 , _a=128 ) -> str: if os.getenv("USE_REAL_DATA" , _a ): lowerCAmelCase_ = "examples/seq2seq/wmt_en_ro" lowerCAmelCase_ = max_len * 2 * 64 if not Path(_a ).joinpath("train.len" ).exists(): save_len_file(_a , _a ) else: lowerCAmelCase_ = "examples/seq2seq/test_data/wmt_en_ro" lowerCAmelCase_ = max_len * 4 save_len_file(_a , _a ) lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a ) lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , n_obs=_a , ) return ds, max_tokens, tokenizer def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset() lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=0 , add_extra_examples=_a ) ) lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=1 , add_extra_examples=_a ) ) assert idsa.intersection(_a ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def __a ( self , _a ) -> List[str]: lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a , use_fast=_a ) if tok_name == MBART_TINY: lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , ) lowerCAmelCase_ = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , ) lowerCAmelCase_ = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(_a ) == 1 if tok_name == BART_TINY else len(_a ) == 0
22
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) class __magic_name__ (__lowercase ): lowerCamelCase__ = '''timm_backbone''' def __init__( self , _a=None , _a=3 , _a=True , _a=True , _a=None , **_a , ) -> Tuple: super().__init__(**_a ) lowerCAmelCase_ = backbone lowerCAmelCase_ = num_channels lowerCAmelCase_ = features_only lowerCAmelCase_ = use_pretrained_backbone lowerCAmelCase_ = True lowerCAmelCase_ = out_indices if out_indices is not None else (-1,)
22
def A(__a: Optional[Any] ): lowerCAmelCase_ = len(__a ) lowerCAmelCase_ = sum(__a ) lowerCAmelCase_ = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): lowerCAmelCase_ = True for i in range(1 , s + 1 ): lowerCAmelCase_ = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): lowerCAmelCase_ = dp[i][j - 1] if arr[i - 1] <= j: lowerCAmelCase_ = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: lowerCAmelCase_ = s - 2 * j break return diff
22
1
import tempfile import unittest import numpy as np from diffusers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionPipeline, PNDMScheduler, ) from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def __a ( self , _a=0 ) -> int: lowerCAmelCase_ = np.random.RandomState(_a ) lowerCAmelCase_ = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def __a ( self ) -> str: lowerCAmelCase_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=_a ) lowerCAmelCase_ = self.get_dummy_inputs() lowerCAmelCase_ = pipe(**_a ).images lowerCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowerCAmelCase_ = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) lowerCAmelCase_ = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_a ) pipe.set_progress_bar_config(disable=_a ) lowerCAmelCase_ = self.get_dummy_inputs() lowerCAmelCase_ = pipe(**_a ).images lowerCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowerCAmelCase_ = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) lowerCAmelCase_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_a ) lowerCAmelCase_ = self.get_dummy_inputs() lowerCAmelCase_ = pipe(**_a ).images lowerCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowerCAmelCase_ = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __a ( self ) -> Any: lowerCAmelCase_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) lowerCAmelCase_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_a ) lowerCAmelCase_ = self.get_dummy_inputs() lowerCAmelCase_ = pipe(**_a ).images lowerCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowerCAmelCase_ = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __a ( self ) -> List[Any]: lowerCAmelCase_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) lowerCAmelCase_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_a ) lowerCAmelCase_ = self.get_dummy_inputs() lowerCAmelCase_ = pipe(**_a ).images lowerCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowerCAmelCase_ = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __a ( self ) -> int: lowerCAmelCase_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) lowerCAmelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_a ) lowerCAmelCase_ = self.get_dummy_inputs() lowerCAmelCase_ = pipe(**_a ).images lowerCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) lowerCAmelCase_ = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __a ( self ) -> Tuple: lowerCAmelCase_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=_a ) lowerCAmelCase_ = self.get_dummy_inputs() lowerCAmelCase_ = 3 * [inputs["prompt"]] # forward lowerCAmelCase_ = pipe(**_a ) lowerCAmelCase_ = output.images[0, -3:, -3:, -1] lowerCAmelCase_ = self.get_dummy_inputs() lowerCAmelCase_ = 3 * [inputs.pop("prompt" )] lowerCAmelCase_ = pipe.tokenizer( _a , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=_a , return_tensors="np" , ) lowerCAmelCase_ = text_inputs["input_ids"] lowerCAmelCase_ = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] lowerCAmelCase_ = prompt_embeds # forward lowerCAmelCase_ = pipe(**_a ) lowerCAmelCase_ = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 def __a ( self ) -> int: lowerCAmelCase_ = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=_a ) lowerCAmelCase_ = self.get_dummy_inputs() lowerCAmelCase_ = 3 * ["this is a negative prompt"] lowerCAmelCase_ = negative_prompt lowerCAmelCase_ = 3 * [inputs["prompt"]] # forward lowerCAmelCase_ = pipe(**_a ) lowerCAmelCase_ = output.images[0, -3:, -3:, -1] lowerCAmelCase_ = self.get_dummy_inputs() lowerCAmelCase_ = 3 * [inputs.pop("prompt" )] lowerCAmelCase_ = [] for p in [prompt, negative_prompt]: lowerCAmelCase_ = pipe.tokenizer( _a , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=_a , return_tensors="np" , ) lowerCAmelCase_ = text_inputs["input_ids"] embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] ) lowerCAmelCase_ , lowerCAmelCase_ = embeds # forward lowerCAmelCase_ = pipe(**_a ) lowerCAmelCase_ = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4 @nightly @require_onnxruntime @require_torch_gpu class __magic_name__ (unittest.TestCase ): @property def __a ( self ) -> List[str]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __a ( self ) -> Dict: lowerCAmelCase_ = ort.SessionOptions() lowerCAmelCase_ = False return options def __a ( self ) -> Optional[int]: # using the PNDM scheduler by default lowerCAmelCase_ = OnnxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=_a ) lowerCAmelCase_ = "A painting of a squirrel eating a burger" np.random.seed(0 ) lowerCAmelCase_ = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="np" ) lowerCAmelCase_ = output.images lowerCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __a ( self ) -> Optional[int]: lowerCAmelCase_ = DDIMScheduler.from_pretrained( "runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" ) lowerCAmelCase_ = OnnxStableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=_a , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=_a ) lowerCAmelCase_ = "open neural network exchange" lowerCAmelCase_ = np.random.RandomState(0 ) lowerCAmelCase_ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_a , output_type="np" ) lowerCAmelCase_ = output.images lowerCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __a ( self ) -> List[str]: lowerCAmelCase_ = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" ) lowerCAmelCase_ = OnnxStableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=_a , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , ) sd_pipe.set_progress_bar_config(disable=_a ) lowerCAmelCase_ = "open neural network exchange" lowerCAmelCase_ = np.random.RandomState(0 ) lowerCAmelCase_ = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_a , output_type="np" ) lowerCAmelCase_ = output.images lowerCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase_ = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = 0 def test_callback_fn(_a , _a , _a ) -> None: lowerCAmelCase_ = True nonlocal number_of_steps number_of_steps += 1 if step == 0: assert latents.shape == (1, 4, 64, 64) lowerCAmelCase_ = latents[0, -3:, -3:, -1] lowerCAmelCase_ = np.array( [-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3 elif step == 5: assert latents.shape == (1, 4, 64, 64) lowerCAmelCase_ = latents[0, -3:, -3:, -1] lowerCAmelCase_ = np.array( [-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3 lowerCAmelCase_ = False lowerCAmelCase_ = OnnxStableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_a ) lowerCAmelCase_ = "Andromeda galaxy in a bottle" lowerCAmelCase_ = np.random.RandomState(0 ) pipe( prompt=_a , num_inference_steps=5 , guidance_scale=7.5 , generator=_a , callback=_a , callback_steps=1 , ) assert test_callback_fn.has_been_called assert number_of_steps == 6 def __a ( self ) -> Optional[int]: lowerCAmelCase_ = OnnxStableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , ) assert isinstance(_a , _a ) assert pipe.safety_checker is None lowerCAmelCase_ = pipe("example prompt" , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(_a ) lowerCAmelCase_ = OnnxStableDiffusionPipeline.from_pretrained(_a ) # sanity check that the pipeline still works assert pipe.safety_checker is None lowerCAmelCase_ = pipe("example prompt" , num_inference_steps=2 ).images[0] assert image is not None
22
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def A(__a: Any , __a: Union[str, Any] , __a: List[str] ): lowerCAmelCase_ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] lowerCAmelCase_ = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } lowerCAmelCase_ = F"{src_lang}-{tgt_lang}" lowerCAmelCase_ = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n" os.makedirs(__a , exist_ok=__a ) lowerCAmelCase_ = os.path.join(__a , "README.md" ) print(F"Generating {path}" ) with open(__a , "w" , encoding="utf-8" ) as f: f.write(__a ) # make sure we are under the root of the project lowerCamelCase__ = Path(__file__).resolve().parent.parent.parent lowerCamelCase__ = repo_dir / '''model_cards''' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model_name.split('''-''') lowerCamelCase__ = model_cards_dir / '''facebook''' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
22
1
import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = {'''vocab_file''': '''vocab.json'''} lowerCamelCase__ = { '''vocab_file''': { '''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''', } } lowerCamelCase__ = {'''mgp-str''': 27} class __magic_name__ (__lowercase ): lowerCamelCase__ = VOCAB_FILES_NAMES lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , _a , _a="[GO]" , _a="[GO]" , _a="[s]" , _a="[GO]" , **_a ) -> List[str]: super().__init__( unk_token=_a , bos_token=_a , eos_token=_a , pad_token=_a , **_a , ) with open(_a , encoding="utf-8" ) as vocab_handle: lowerCAmelCase_ = json.load(_a ) lowerCAmelCase_ = {v: k for k, v in self.vocab.items()} @property def __a ( self ) -> Union[str, Any]: return len(self.vocab ) def __a ( self ) -> str: return dict(self.vocab , **self.added_tokens_encoder ) def __a ( self , _a ) -> int: lowerCAmelCase_ = [] for s in text: char_tokens.extend(_a ) return char_tokens def __a ( self , _a ) -> Optional[Any]: return self.vocab.get(_a , self.vocab.get(self.unk_token ) ) def __a ( self , _a ) -> List[Any]: return self.decoder.get(_a ) def __a ( self , _a , _a = None ) -> Tuple[str]: if not os.path.isdir(_a ): logger.error("Vocabulary path ({}) should be a directory".format(_a ) ) return lowerCAmelCase_ = os.path.join( _a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) with open(_a , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=_a , ensure_ascii=_a ) + "\n" ) return (vocab_file,)
22
import re from filelock import FileLock try: import nltk lowerCamelCase__ = True except (ImportError, ModuleNotFoundError): lowerCamelCase__ = False if NLTK_AVAILABLE: with FileLock('''.lock''') as lock: nltk.download('''punkt''', quiet=True) def A(__a: str ): re.sub("<n>" , "" , __a ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__a ) )
22
1
import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = ReformerTokenizer lowerCamelCase__ = ReformerTokenizerFast lowerCamelCase__ = True lowerCamelCase__ = False lowerCamelCase__ = True def __a ( self ) -> List[Any]: super().setUp() lowerCAmelCase_ = ReformerTokenizer(_a , keep_accents=_a ) tokenizer.save_pretrained(self.tmpdirname ) def __a ( self ) -> int: lowerCAmelCase_ = "<s>" lowerCAmelCase_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a ) def __a ( self ) -> int: lowerCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(_a ) , 1000 ) def __a ( self ) -> Union[str, Any]: self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def __a ( self ) -> Dict: if not self.test_rust_tokenizer: return lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = "I was born in 92000, and this is falsé." lowerCAmelCase_ = tokenizer.tokenize(_a ) lowerCAmelCase_ = rust_tokenizer.tokenize(_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = tokenizer.encode(_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a ) self.assertListEqual(_a , _a ) def __a ( self , _a=15 ) -> Any: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) # Simple input lowerCAmelCase_ = "This is a simple input" lowerCAmelCase_ = ["This is a simple input 1", "This is a simple input 2"] lowerCAmelCase_ = ("This is a simple input", "This is a pair") lowerCAmelCase_ = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(_a , tokenizer_r.encode , _a , max_length=_a , padding="max_length" ) # Simple input self.assertRaises(_a , tokenizer_r.encode_plus , _a , max_length=_a , padding="max_length" ) # Simple input self.assertRaises( _a , tokenizer_r.batch_encode_plus , _a , max_length=_a , padding="max_length" , ) # Pair input self.assertRaises(_a , tokenizer_r.encode , _a , max_length=_a , padding="max_length" ) # Pair input self.assertRaises(_a , tokenizer_r.encode_plus , _a , max_length=_a , padding="max_length" ) # Pair input self.assertRaises( _a , tokenizer_r.batch_encode_plus , _a , max_length=_a , padding="max_length" , ) def __a ( self ) -> str: pass def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = ReformerTokenizer(_a , keep_accents=_a ) lowerCAmelCase_ = tokenizer.tokenize("This is a test" ) self.assertListEqual(_a , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_a ) , [285, 46, 10, 170, 382] , ) lowerCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( _a , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) lowerCAmelCase_ = tokenizer.convert_tokens_to_ids(_a ) self.assertListEqual( _a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(_a ) self.assertListEqual( _a , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def __a ( self ) -> str: return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" ) @slow def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = "Hello World!" lowerCAmelCase_ = [126, 32, 262, 152, 38, 72, 287] self.assertListEqual(_a , self.big_tokenizer.encode(_a ) ) @slow def __a ( self ) -> str: lowerCAmelCase_ = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth" ) lowerCAmelCase_ = [ 108, 265, 24, 111, 4, 258, 156, 35, 28, 275, 3, 259, 297, 260, 84, 4, 35, 110, 44, 8, 259, 91, 268, 21, 11, 209, 274, 109, 266, 277, 117, 86, 93, 315, 258, 278, 258, 277, 258, 0, 258, 288, 258, 319, 258, 0, 258, 0, 258, 0, 258, 0, 258, 287, 258, 315, 258, 289, 258, 278, 99, 269, 266, 262, 8, 259, 241, 4, 217, 230, 268, 266, 55, 168, 106, 75, 193, 266, 223, 27, 49, 26, 282, 25, 264, 299, 19, 26, 0, 258, 277, 117, 86, 93, 176, 183, 270, 11, 262, 42, 61, 265, ] self.assertListEqual(_a , self.big_tokenizer.encode(_a ) ) @require_torch @slow def __a ( self ) -> Tuple: import torch from transformers import ReformerConfig, ReformerModel # Build sequence lowerCAmelCase_ = list(self.big_tokenizer.get_vocab().keys() )[:10] lowerCAmelCase_ = " ".join(_a ) lowerCAmelCase_ = self.big_tokenizer.encode_plus(_a , return_tensors="pt" ) lowerCAmelCase_ = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" ) lowerCAmelCase_ = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) lowerCAmelCase_ = encoded_sequence["input_ids"].shape lowerCAmelCase_ = ReformerModel(_a ) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**_a ) model(**_a ) @slow def __a ( self ) -> List[str]: # fmt: off lowerCAmelCase_ = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 lowerCAmelCase_ = [ "This is a very simple sentence.", "The quick brown fox jumps over the lazy dog.", ] self.tokenizer_integration_test_util( expected_encoding=_a , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=_a , sequences=_a , )
22
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowerCamelCase__ = { '''configuration_encodec''': [ '''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''EncodecConfig''', ], '''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''', '''EncodecModel''', '''EncodecPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
1
from math import factorial def A(__a: int , __a: int , __a: float ): if successes > trials: raise ValueError("successes must be lower or equal to trials" ) if trials < 0 or successes < 0: raise ValueError("the function is defined for non-negative integers" ) if not isinstance(__a , __a ) or not isinstance(__a , __a ): raise ValueError("the function is defined for non-negative integers" ) if not 0 < prob < 1: raise ValueError("prob has to be in range of 1 - 0" ) lowerCAmelCase_ = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! lowerCAmelCase_ = float(factorial(__a ) ) coefficient /= factorial(__a ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print('''Probability of 2 successes out of 4 trails''') print('''with probability of 0.75 is:''', end=''' ''') print(binomial_distribution(2, 4, 0.75))
22
import logging from transformers import PretrainedConfig lowerCamelCase__ = logging.getLogger(__name__) lowerCamelCase__ = { '''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''', } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''bertabs''' def __init__( self , _a=30522 , _a=512 , _a=6 , _a=512 , _a=8 , _a=512 , _a=0.2 , _a=6 , _a=768 , _a=8 , _a=2048 , _a=0.2 , **_a , ) -> List[Any]: super().__init__(**_a ) lowerCAmelCase_ = vocab_size lowerCAmelCase_ = max_pos lowerCAmelCase_ = enc_layers lowerCAmelCase_ = enc_hidden_size lowerCAmelCase_ = enc_heads lowerCAmelCase_ = enc_ff_size lowerCAmelCase_ = enc_dropout lowerCAmelCase_ = dec_layers lowerCAmelCase_ = dec_hidden_size lowerCAmelCase_ = dec_heads lowerCAmelCase_ = dec_ff_size lowerCAmelCase_ = dec_dropout
22
1
import flax.linen as nn import jax import jax.numpy as jnp class __magic_name__ (nn.Module ): lowerCamelCase__ = 42 lowerCamelCase__ = jnp.floataa def __a ( self ) -> Any: lowerCAmelCase_ = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , _a ) -> Optional[Any]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = hidden_states.shape lowerCAmelCase_ = jax.image.resize( _a , shape=(batch, height * 2, width * 2, channels) , method="nearest" , ) lowerCAmelCase_ = self.conv(_a ) return hidden_states class __magic_name__ (nn.Module ): lowerCamelCase__ = 42 lowerCamelCase__ = jnp.floataa def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , _a ) -> List[str]: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) lowerCAmelCase_ = self.conv(_a ) return hidden_states class __magic_name__ (nn.Module ): lowerCamelCase__ = 42 lowerCamelCase__ = None lowerCamelCase__ = 0.0 lowerCamelCase__ = None lowerCamelCase__ = jnp.floataa def __a ( self ) -> Optional[int]: lowerCAmelCase_ = self.in_channels if self.out_channels is None else self.out_channels lowerCAmelCase_ = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) lowerCAmelCase_ = nn.Conv( _a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) lowerCAmelCase_ = nn.Dense(_a , dtype=self.dtype ) lowerCAmelCase_ = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) lowerCAmelCase_ = nn.Dropout(self.dropout_prob ) lowerCAmelCase_ = nn.Conv( _a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) lowerCAmelCase_ = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut lowerCAmelCase_ = None if use_nin_shortcut: lowerCAmelCase_ = nn.Conv( _a , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , ) def __call__( self , _a , _a , _a=True ) -> Tuple: lowerCAmelCase_ = hidden_states lowerCAmelCase_ = self.norma(_a ) lowerCAmelCase_ = nn.swish(_a ) lowerCAmelCase_ = self.conva(_a ) lowerCAmelCase_ = self.time_emb_proj(nn.swish(_a ) ) lowerCAmelCase_ = jnp.expand_dims(jnp.expand_dims(_a , 1 ) , 1 ) lowerCAmelCase_ = hidden_states + temb lowerCAmelCase_ = self.norma(_a ) lowerCAmelCase_ = nn.swish(_a ) lowerCAmelCase_ = self.dropout(_a , _a ) lowerCAmelCase_ = self.conva(_a ) if self.conv_shortcut is not None: lowerCAmelCase_ = self.conv_shortcut(_a ) return hidden_states + residual
22
import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def A(__a: Tuple , __a: Union[str, Any] ): lowerCAmelCase_ = checkpoint lowerCAmelCase_ = {} lowerCAmelCase_ = vae_state_dict["encoder.conv_in.weight"] lowerCAmelCase_ = vae_state_dict["encoder.conv_in.bias"] lowerCAmelCase_ = vae_state_dict["encoder.conv_out.weight"] lowerCAmelCase_ = vae_state_dict["encoder.conv_out.bias"] lowerCAmelCase_ = vae_state_dict["encoder.norm_out.weight"] lowerCAmelCase_ = vae_state_dict["encoder.norm_out.bias"] lowerCAmelCase_ = vae_state_dict["decoder.conv_in.weight"] lowerCAmelCase_ = vae_state_dict["decoder.conv_in.bias"] lowerCAmelCase_ = vae_state_dict["decoder.conv_out.weight"] lowerCAmelCase_ = vae_state_dict["decoder.conv_out.bias"] lowerCAmelCase_ = vae_state_dict["decoder.norm_out.weight"] lowerCAmelCase_ = vae_state_dict["decoder.norm_out.bias"] lowerCAmelCase_ = vae_state_dict["quant_conv.weight"] lowerCAmelCase_ = vae_state_dict["quant_conv.bias"] lowerCAmelCase_ = vae_state_dict["post_quant_conv.weight"] lowerCAmelCase_ = vae_state_dict["post_quant_conv.bias"] # Retrieves the keys for the encoder down blocks only lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} ) lowerCAmelCase_ = { layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(__a ) } # Retrieves the keys for the decoder up blocks only lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} ) lowerCAmelCase_ = { layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(__a ) } for i in range(__a ): lowerCAmelCase_ = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key] if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict: lowerCAmelCase_ = vae_state_dict.pop( F"encoder.down.{i}.downsample.conv.weight" ) lowerCAmelCase_ = vae_state_dict.pop( F"encoder.down.{i}.downsample.conv.bias" ) lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"down.{i}.block", "new": F"down_blocks.{i}.resnets"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key] lowerCAmelCase_ = 2 for i in range(1 , num_mid_res_blocks + 1 ): lowerCAmelCase_ = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key] lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key] lowerCAmelCase_ = renew_vae_attention_paths(__a ) lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) conv_attn_to_linear(__a ) for i in range(__a ): lowerCAmelCase_ = num_up_blocks - 1 - i lowerCAmelCase_ = [ key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key ] if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict: lowerCAmelCase_ = vae_state_dict[ F"decoder.up.{block_id}.upsample.conv.weight" ] lowerCAmelCase_ = vae_state_dict[ F"decoder.up.{block_id}.upsample.conv.bias" ] lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"up.{block_id}.block", "new": F"up_blocks.{i}.resnets"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key] lowerCAmelCase_ = 2 for i in range(1 , num_mid_res_blocks + 1 ): lowerCAmelCase_ = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key] lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key] lowerCAmelCase_ = renew_vae_attention_paths(__a ) lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) conv_attn_to_linear(__a ) return new_checkpoint def A(__a: str , __a: str , ): # Only support V1 lowerCAmelCase_ = requests.get( " https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" ) lowerCAmelCase_ = io.BytesIO(r.content ) lowerCAmelCase_ = OmegaConf.load(__a ) lowerCAmelCase_ = 512 lowerCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu" if checkpoint_path.endswith("safetensors" ): from safetensors import safe_open lowerCAmelCase_ = {} with safe_open(__a , framework="pt" , device="cpu" ) as f: for key in f.keys(): lowerCAmelCase_ = f.get_tensor(__a ) else: lowerCAmelCase_ = torch.load(__a , map_location=__a )["state_dict"] # Convert the VAE model. lowerCAmelCase_ = create_vae_diffusers_config(__a , image_size=__a ) lowerCAmelCase_ = custom_convert_ldm_vae_checkpoint(__a , __a ) lowerCAmelCase_ = AutoencoderKL(**__a ) vae.load_state_dict(__a ) vae.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') lowerCamelCase__ = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
22
1
import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow lowerCamelCase__ = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ '''text-classification''', '''language-modeling''', '''summarization''', '''token-classification''', '''question-answering''', ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) lowerCamelCase__ = logging.getLogger() def A(): lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument("-f" ) lowerCAmelCase_ = parser.parse_args() return args.f def A(__a: List[str] , __a: Optional[Any]="eval" ): lowerCAmelCase_ = os.path.join(__a , F"{split}_results.json" ) if os.path.exists(__a ): with open(__a , "r" ) as f: return json.load(__a ) raise ValueError(F"can't find {path}" ) lowerCamelCase__ = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __magic_name__ (__lowercase ): def __a ( self ) -> List[str]: lowerCAmelCase_ = self.get_auto_remove_tmp_dir() lowerCAmelCase_ = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split() with patch.object(_a , "argv" , _a ): run_flax_glue.main() lowerCAmelCase_ = get_results(_a ) self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 ) @slow def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = self.get_auto_remove_tmp_dir() lowerCAmelCase_ = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split() with patch.object(_a , "argv" , _a ): run_clm_flax.main() lowerCAmelCase_ = get_results(_a ) self.assertLess(result["eval_perplexity"] , 100 ) @slow def __a ( self ) -> Dict: lowerCAmelCase_ = self.get_auto_remove_tmp_dir() lowerCAmelCase_ = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split() with patch.object(_a , "argv" , _a ): run_summarization_flax.main() lowerCAmelCase_ = get_results(_a , split="test" ) self.assertGreaterEqual(result["test_rouge1"] , 10 ) self.assertGreaterEqual(result["test_rouge2"] , 2 ) self.assertGreaterEqual(result["test_rougeL"] , 7 ) self.assertGreaterEqual(result["test_rougeLsum"] , 7 ) @slow def __a ( self ) -> Optional[int]: lowerCAmelCase_ = self.get_auto_remove_tmp_dir() lowerCAmelCase_ = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split() with patch.object(_a , "argv" , _a ): run_mlm_flax.main() lowerCAmelCase_ = get_results(_a ) self.assertLess(result["eval_perplexity"] , 42 ) @slow def __a ( self ) -> str: lowerCAmelCase_ = self.get_auto_remove_tmp_dir() lowerCAmelCase_ = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split() with patch.object(_a , "argv" , _a ): run_ta_mlm_flax.main() lowerCAmelCase_ = get_results(_a ) self.assertGreaterEqual(result["eval_accuracy"] , 0.4_2 ) @slow def __a ( self ) -> List[Any]: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu lowerCAmelCase_ = 7 if get_gpu_count() > 1 else 2 lowerCAmelCase_ = self.get_auto_remove_tmp_dir() lowerCAmelCase_ = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split() with patch.object(_a , "argv" , _a ): run_flax_ner.main() lowerCAmelCase_ = get_results(_a ) self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 ) self.assertGreaterEqual(result["eval_f1"] , 0.3 ) @slow def __a ( self ) -> str: lowerCAmelCase_ = self.get_auto_remove_tmp_dir() lowerCAmelCase_ = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split() with patch.object(_a , "argv" , _a ): run_qa.main() lowerCAmelCase_ = get_results(_a ) self.assertGreaterEqual(result["eval_f1"] , 30 ) self.assertGreaterEqual(result["eval_exact"] , 30 )
22
def A(): return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] lowerCamelCase__ = generate_large_matrix() lowerCamelCase__ = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def A(__a: list[list[int]] ): assert all(row == sorted(__a , reverse=__a ) for row in grid ) assert all(list(__a ) == sorted(__a , reverse=__a ) for col in zip(*__a ) ) def A(__a: list[int] ): lowerCAmelCase_ = 0 lowerCAmelCase_ = len(__a ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: lowerCAmelCase_ = (left + right) // 2 lowerCAmelCase_ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: lowerCAmelCase_ = mid + 1 else: lowerCAmelCase_ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__a ) def A(__a: list[list[int]] ): lowerCAmelCase_ = 0 lowerCAmelCase_ = len(grid[0] ) for i in range(len(__a ) ): lowerCAmelCase_ = find_negative_index(grid[i][:bound] ) total += bound return (len(__a ) * len(grid[0] )) - total def A(__a: list[list[int]] ): return len([number for row in grid for number in row if number < 0] ) def A(__a: list[list[int]] ): lowerCAmelCase_ = 0 for row in grid: for i, number in enumerate(__a ): if number < 0: total += len(__a ) - i break return total def A(): from timeit import timeit print("Running benchmarks" ) lowerCAmelCase_ = ( "from __main__ import count_negatives_binary_search, " "count_negatives_brute_force, count_negatives_brute_force_with_break, grid" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): lowerCAmelCase_ = timeit(F"{func}(grid=grid)" , setup=__a , number=500 ) print(F"{func}() took {time:0.4f} seconds" ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
22
1
def A(__a: str , __a: List[str] ): lowerCAmelCase_ = 0 lowerCAmelCase_ = len(__a ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None lowerCAmelCase_ = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(__a ): return None lowerCAmelCase_ = sorted_collection[point] if current_item == item: return point else: if point < left: lowerCAmelCase_ = left lowerCAmelCase_ = point elif point > right: lowerCAmelCase_ = right lowerCAmelCase_ = point else: if item < current_item: lowerCAmelCase_ = point - 1 else: lowerCAmelCase_ = point + 1 return None def A(__a: Optional[int] , __a: List[Any] , __a: str , __a: Optional[Any] ): # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None lowerCAmelCase_ = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(__a ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(__a , __a , __a , __a ) elif point > right: return interpolation_search_by_recursion(__a , __a , __a , __a ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( __a , __a , __a , point - 1 ) else: return interpolation_search_by_recursion( __a , __a , point + 1 , __a ) def A(__a: Tuple ): if collection != sorted(__a ): raise ValueError("Collection must be ascending sorted" ) return True if __name__ == "__main__": import sys lowerCamelCase__ = 0 if debug == 1: lowerCamelCase__ = [10, 30, 40, 45, 50, 66, 77, 93] try: __assert_sorted(collection) except ValueError: sys.exit('''Sequence must be ascending sorted to apply interpolation search''') lowerCamelCase__ = 67 lowerCamelCase__ = interpolation_search(collection, target) if result is not None: print(F'''{target} found at positions: {result}''') else: print('''Not found''')
22
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging lowerCamelCase__ = logging.get_logger(__name__) def A(__a: Dict ): lowerCAmelCase_ = r"\w+[.]\d+" lowerCAmelCase_ = re.findall(__a , __a ) for pat in pats: lowerCAmelCase_ = key.replace(__a , "_".join(pat.split("." ) ) ) return key def A(__a: str , __a: Tuple , __a: List[Any] ): lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",) if ( any("norm" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowerCAmelCase_ = pt_tuple_key[:-1] + ("embedding",) return renamed_pt_tuple_key, pt_tensor # conv layer lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowerCAmelCase_ = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight": lowerCAmelCase_ = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCAmelCase_ = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCAmelCase_ = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def A(__a: Dict , __a: Any , __a: List[Any]=42 ): # Step 1: Convert pytorch tensor to numpy lowerCAmelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowerCAmelCase_ = flax_model.init_weights(PRNGKey(__a ) ) lowerCAmelCase_ = flatten_dict(__a ) lowerCAmelCase_ = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCAmelCase_ = rename_key(__a ) lowerCAmelCase_ = tuple(renamed_pt_key.split("." ) ) # Correctly rename weight parameters lowerCAmelCase_ , lowerCAmelCase_ = rename_key_and_reshape_tensor(__a , __a , __a ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." ) # also add unexpected weight so that warning is thrown lowerCAmelCase_ = jnp.asarray(__a ) return unflatten_dict(__a )
22
1
def A(__a: int = 100_0000 ): lowerCAmelCase_ = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , __a ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
22
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
1
from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def A(__a: str ): lowerCAmelCase_ , lowerCAmelCase_ = analyze_text(__a ) lowerCAmelCase_ = list(" " + ascii_lowercase ) # what is our total sum of probabilities. lowerCAmelCase_ = sum(single_char_strings.values() ) # one length string lowerCAmelCase_ = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: lowerCAmelCase_ = single_char_strings[ch] lowerCAmelCase_ = my_str / all_sum my_fir_sum += prob * math.loga(__a ) # entropy formula. # print entropy print(F"{round(-1 * my_fir_sum ):.1f}" ) # two len string lowerCAmelCase_ = sum(two_char_strings.values() ) lowerCAmelCase_ = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: lowerCAmelCase_ = cha + cha if sequence in two_char_strings: lowerCAmelCase_ = two_char_strings[sequence] lowerCAmelCase_ = int(__a ) / all_sum my_sec_sum += prob * math.loga(__a ) # print second entropy print(F"{round(-1 * my_sec_sum ):.1f}" ) # print the difference between them print(F"{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}" ) def A(__a: str ): lowerCAmelCase_ = Counter() # type: ignore lowerCAmelCase_ = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(__a ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def A(): import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
22
import math def A(__a: int ): return math.sqrt(__a ) * math.sqrt(__a ) == num def A(__a: int ): lowerCAmelCase_ = 0 lowerCAmelCase_ = n while left <= right: lowerCAmelCase_ = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: lowerCAmelCase_ = mid - 1 else: lowerCAmelCase_ = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
22
1
import logging from transformers import PretrainedConfig lowerCamelCase__ = logging.getLogger(__name__) lowerCamelCase__ = { '''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''', } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''bertabs''' def __init__( self , _a=30522 , _a=512 , _a=6 , _a=512 , _a=8 , _a=512 , _a=0.2 , _a=6 , _a=768 , _a=8 , _a=2048 , _a=0.2 , **_a , ) -> List[Any]: super().__init__(**_a ) lowerCAmelCase_ = vocab_size lowerCAmelCase_ = max_pos lowerCAmelCase_ = enc_layers lowerCAmelCase_ = enc_hidden_size lowerCAmelCase_ = enc_heads lowerCAmelCase_ = enc_ff_size lowerCAmelCase_ = enc_dropout lowerCAmelCase_ = dec_layers lowerCAmelCase_ = dec_hidden_size lowerCAmelCase_ = dec_heads lowerCAmelCase_ = dec_ff_size lowerCAmelCase_ = dec_dropout
22
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers lowerCamelCase__ = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append('''dataclasses''') if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append('''importlib_metadata''') for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def A(__a: Dict , __a: List[str]=None ): require_version(deps[pkg] , __a )
22
1
from itertools import product def A(__a: int , __a: int ): lowerCAmelCase_ = sides_number lowerCAmelCase_ = max_face_number * dice_number lowerCAmelCase_ = [0] * (max_total + 1) lowerCAmelCase_ = 1 lowerCAmelCase_ = range(__a , max_face_number + 1 ) for dice_numbers in product(__a , repeat=__a ): lowerCAmelCase_ = sum(__a ) totals_frequencies[total] += 1 return totals_frequencies def A(): lowerCAmelCase_ = total_frequency_distribution( sides_number=4 , dice_number=9 ) lowerCAmelCase_ = total_frequency_distribution( sides_number=6 , dice_number=6 ) lowerCAmelCase_ = 0 lowerCAmelCase_ = 9 lowerCAmelCase_ = 4 * 9 lowerCAmelCase_ = 6 for peter_total in range(__a , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) lowerCAmelCase_ = (4**9) * (6**6) lowerCAmelCase_ = peter_wins_count / total_games_number lowerCAmelCase_ = round(__a , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F'''{solution() = }''')
22
import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging lowerCamelCase__ = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt'''] lowerCamelCase__ = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse('''0.9.0'''): raise Exception('''requires fairseq >= 0.9.0''') logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = ''' Hello world! cécé herlolip''' lowerCamelCase__ = [ ('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''), ('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''), ('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''), ('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''), ] def A(__a: Any ): lowerCAmelCase_ = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "_float_tensor", ] for k in ignore_keys: state_dict.pop(__a , __a ) def A(__a: Optional[int] , __a: List[Any] , __a: Union[str, Any] ): lowerCAmelCase_ = dct.pop(__a ) lowerCAmelCase_ = val def A(__a: Tuple ): lowerCAmelCase_ = torch.load(__a , map_location="cpu" ) lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval() hub_interface.model.load_state_dict(sd["model"] ) return hub_interface def A(__a: List[str] ): lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape lowerCAmelCase_ = nn.Linear(__a , __a , bias=__a ) lowerCAmelCase_ = emb.weight.data return lin_layer @torch.no_grad() def A(__a: Tuple , __a: Union[str, Any] , __a: str=None ): if not os.path.exists(__a ): lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , __a ).eval() else: lowerCAmelCase_ = load_xsum_checkpoint(__a ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: lowerCAmelCase_ = checkpoint_path.replace("." , "-" ) lowerCAmelCase_ = BartConfig.from_pretrained(__a ) lowerCAmelCase_ = bart.encode(__a ).unsqueeze(0 ) lowerCAmelCase_ = BartTokenizer.from_pretrained(__a ).encode(__a , return_tensors="pt" ).unsqueeze(0 ) if not torch.eq(__a , __a ).all(): raise ValueError( F"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" ) if checkpoint_path == "bart.large.mnli": lowerCAmelCase_ = bart.state_dict() remove_ignore_keys_(__a ) lowerCAmelCase_ = state_dict["model.decoder.embed_tokens.weight"] for src, dest in mnli_rename_keys: rename_key(__a , __a , __a ) lowerCAmelCase_ = BartForSequenceClassification(__a ).eval() model.load_state_dict(__a ) lowerCAmelCase_ = bart.predict("mnli" , __a , return_logits=__a ) lowerCAmelCase_ = model(__a )[0] # logits else: # no classification heads to worry about lowerCAmelCase_ = bart.model.state_dict() remove_ignore_keys_(__a ) lowerCAmelCase_ = state_dict["decoder.embed_tokens.weight"] lowerCAmelCase_ = bart.extract_features(__a ) if hf_checkpoint_name == "facebook/bart-large": lowerCAmelCase_ = BartModel(__a ).eval() model.load_state_dict(__a ) lowerCAmelCase_ = model(__a ).model[0] else: lowerCAmelCase_ = BartForConditionalGeneration(__a ).eval() # an existing summarization ckpt model.model.load_state_dict(__a ) if hasattr(__a , "lm_head" ): lowerCAmelCase_ = make_linear_from_emb(model.model.shared ) lowerCAmelCase_ = model.model(__a )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( F"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" ) Path(__a ).mkdir(exist_ok=__a ) model.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum''' ) lowerCamelCase__ = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
22
1
from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class __magic_name__ : lowerCamelCase__ = 42 lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = namedtuple('''CoinsDistribResult''', '''moves excess''') def A(__a: TreeNode | None ): if root is None: return 0 # Validation def count_nodes(__a: TreeNode | None ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(__a: TreeNode | None ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(__a ) != count_coins(__a ): raise ValueError("The nodes number should be same as the number of coins" ) # Main calculation def get_distrib(__a: TreeNode | None ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) lowerCAmelCase_ , lowerCAmelCase_ = get_distrib(node.left ) lowerCAmelCase_ , lowerCAmelCase_ = get_distrib(node.right ) lowerCAmelCase_ = 1 - left_distrib_excess lowerCAmelCase_ = 1 - right_distrib_excess lowerCAmelCase_ = ( left_distrib_moves + right_distrib_moves + abs(__a ) + abs(__a ) ) lowerCAmelCase_ = node.data - coins_to_left - coins_to_right return CoinsDistribResult(__a , __a ) return get_distrib(__a )[0] if __name__ == "__main__": import doctest doctest.testmod()
22
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = MobileBertTokenizer lowerCamelCase__ = MobileBertTokenizerFast lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = filter_non_english lowerCamelCase__ = '''google/mobilebert-uncased''' def __a ( self ) -> Optional[Any]: super().setUp() lowerCAmelCase_ = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) lowerCAmelCase_ = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def __a ( self , _a ) -> Any: lowerCAmelCase_ = "UNwant\u00E9d,running" lowerCAmelCase_ = "unwanted, running" return input_text, output_text def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = self.tokenizer_class(self.vocab_file ) lowerCAmelCase_ = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(_a , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] ) def __a ( self ) -> Tuple: if not self.test_rust_tokenizer: return lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = "UNwant\u00E9d,running" lowerCAmelCase_ = tokenizer.tokenize(_a ) lowerCAmelCase_ = rust_tokenizer.tokenize(_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = tokenizer.encode(_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a ) self.assertListEqual(_a , _a ) # With lower casing lowerCAmelCase_ = self.get_tokenizer(do_lower_case=_a ) lowerCAmelCase_ = self.get_rust_tokenizer(do_lower_case=_a ) lowerCAmelCase_ = "UNwant\u00E9d,running" lowerCAmelCase_ = tokenizer.tokenize(_a ) lowerCAmelCase_ = rust_tokenizer.tokenize(_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = tokenizer.encode(_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a ) self.assertListEqual(_a , _a ) def __a ( self ) -> Any: lowerCAmelCase_ = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def __a ( self ) -> Dict: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __a ( self ) -> List[Any]: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def __a ( self ) -> str: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __a ( self ) -> str: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __a ( self ) -> str: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def __a ( self ) -> List[str]: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def __a ( self ) -> Any: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def __a ( self ) -> Any: lowerCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] lowerCAmelCase_ = {} for i, token in enumerate(_a ): lowerCAmelCase_ = i lowerCAmelCase_ = WordpieceTokenizer(vocab=_a , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def __a ( self ) -> Optional[int]: self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def __a ( self ) -> List[str]: self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def __a ( self ) -> Dict: self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def __a ( self ) -> Any: lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) self.assertListEqual( [rust_tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) @slow def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" ) lowerCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a ) lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a , _a ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def __a ( self ) -> Union[str, Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence." lowerCAmelCase_ = tokenizer_r.encode_plus( _a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , ) lowerCAmelCase_ = tokenizer_r.do_lower_case if hasattr(_a , "do_lower_case" ) else False lowerCAmelCase_ = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def __a ( self ) -> Optional[int]: lowerCAmelCase_ = ["的", "人", "有"] lowerCAmelCase_ = "".join(_a ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCAmelCase_ = True lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a ) lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_a , _a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = False lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a ) lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a ) # it is expected that only the first Chinese character is not preceded by "##". lowerCAmelCase_ = [ f"##{token}" if idx != 0 else token for idx, token in enumerate(_a ) ] self.assertListEqual(_a , _a ) self.assertListEqual(_a , _a )
22
1
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCamelCase__ = 16 lowerCamelCase__ = 32 def A(__a: Accelerator , __a: int = 16 ): lowerCAmelCase_ = AutoTokenizer.from_pretrained("bert-base-cased" ) lowerCAmelCase_ = load_dataset("glue" , "mrpc" ) def tokenize_function(__a: Dict ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__a , max_length=__a ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCAmelCase_ = datasets.map( __a , batched=__a , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCAmelCase_ = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(__a: Any ): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCAmelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCAmelCase_ = 16 elif accelerator.mixed_precision != "no": lowerCAmelCase_ = 8 else: lowerCAmelCase_ = None return tokenizer.pad( __a , padding="longest" , max_length=__a , pad_to_multiple_of=__a , return_tensors="pt" , ) # Instantiate dataloaders. lowerCAmelCase_ = DataLoader( tokenized_datasets["train"] , shuffle=__a , collate_fn=__a , batch_size=__a ) lowerCAmelCase_ = DataLoader( tokenized_datasets["validation"] , shuffle=__a , collate_fn=__a , batch_size=__a ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowerCamelCase__ = mocked_dataloaders # noqa: F811 def A(__a: List[Any] , __a: List[str] ): # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS" , __a ) == "1": lowerCAmelCase_ = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: lowerCAmelCase_ = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir ) else: lowerCAmelCase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCAmelCase_ = config["lr"] lowerCAmelCase_ = int(config["num_epochs"] ) lowerCAmelCase_ = int(config["seed"] ) lowerCAmelCase_ = int(config["batch_size"] ) set_seed(__a ) lowerCAmelCase_ , lowerCAmelCase_ = get_dataloaders(__a , __a ) lowerCAmelCase_ = evaluate.load("glue" , "mrpc" ) # If the batch size is too big we use gradient accumulation lowerCAmelCase_ = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: lowerCAmelCase_ = batch_size // MAX_GPU_BATCH_SIZE lowerCAmelCase_ = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCAmelCase_ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=__a ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCAmelCase_ = model.to(accelerator.device ) # Instantiate optimizer lowerCAmelCase_ = AdamW(params=model.parameters() , lr=__a ) # Instantiate scheduler lowerCAmelCase_ = get_linear_schedule_with_warmup( optimizer=__a , num_warmup_steps=100 , num_training_steps=(len(__a ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare( __a , __a , __a , __a , __a ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: lowerCAmelCase_ = os.path.split(__a )[-1].split("." )[0] accelerator.init_trackers(__a , __a ) # Now we train the model for epoch in range(__a ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: lowerCAmelCase_ = 0 for step, batch in enumerate(__a ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowerCAmelCase_ = model(**__a ) lowerCAmelCase_ = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() lowerCAmelCase_ = loss / gradient_accumulation_steps accelerator.backward(__a ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__a ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): lowerCAmelCase_ = model(**__a ) lowerCAmelCase_ = outputs.logits.argmax(dim=-1 ) lowerCAmelCase_ , lowerCAmelCase_ = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=__a , references=__a , ) lowerCAmelCase_ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"epoch {epoch}:" , __a ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { "accuracy": eval_metric["accuracy"], "f1": eval_metric["f1"], "train_loss": total_loss.item() / len(__a ), "epoch": epoch, } , step=__a , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def A(): lowerCAmelCase_ = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=__a , default=__a , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) parser.add_argument( "--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , ) parser.add_argument( "--project_dir" , type=__a , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , ) lowerCAmelCase_ = parser.parse_args() lowerCAmelCase_ = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(__a , __a ) if __name__ == "__main__": main()
22
import math from collections.abc import Iterator from itertools import takewhile def A(__a: int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A(): lowerCAmelCase_ = 2 while True: if is_prime(__a ): yield num num += 1 def A(__a: int = 200_0000 ): return sum(takewhile(lambda __a : x < n , prime_generator() ) ) if __name__ == "__main__": print(F'''{solution() = }''')
22
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { '''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''', # See all Cvt models at https://huggingface.co/models?filter=cvt } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''cvt''' def __init__( self , _a=3 , _a=[7, 3, 3] , _a=[4, 2, 2] , _a=[2, 1, 1] , _a=[64, 192, 384] , _a=[1, 3, 6] , _a=[1, 2, 10] , _a=[4.0, 4.0, 4.0] , _a=[0.0, 0.0, 0.0] , _a=[0.0, 0.0, 0.0] , _a=[0.0, 0.0, 0.1] , _a=[True, True, True] , _a=[False, False, True] , _a=["dw_bn", "dw_bn", "dw_bn"] , _a=[3, 3, 3] , _a=[1, 1, 1] , _a=[2, 2, 2] , _a=[1, 1, 1] , _a=[1, 1, 1] , _a=0.0_2 , _a=1E-12 , **_a , ) -> Union[str, Any]: super().__init__(**_a ) lowerCAmelCase_ = num_channels lowerCAmelCase_ = patch_sizes lowerCAmelCase_ = patch_stride lowerCAmelCase_ = patch_padding lowerCAmelCase_ = embed_dim lowerCAmelCase_ = num_heads lowerCAmelCase_ = depth lowerCAmelCase_ = mlp_ratio lowerCAmelCase_ = attention_drop_rate lowerCAmelCase_ = drop_rate lowerCAmelCase_ = drop_path_rate lowerCAmelCase_ = qkv_bias lowerCAmelCase_ = cls_token lowerCAmelCase_ = qkv_projection_method lowerCAmelCase_ = kernel_qkv lowerCAmelCase_ = padding_kv lowerCAmelCase_ = stride_kv lowerCAmelCase_ = padding_q lowerCAmelCase_ = stride_q lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps
22
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { '''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''', '''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''', '''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''', '''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''', # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''mobilenet_v2''' def __init__( self , _a=3 , _a=224 , _a=1.0 , _a=8 , _a=8 , _a=6 , _a=32 , _a=True , _a=True , _a="relu6" , _a=True , _a=0.8 , _a=0.0_2 , _a=0.0_0_1 , _a=255 , **_a , ) -> Dict: super().__init__(**_a ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) lowerCAmelCase_ = num_channels lowerCAmelCase_ = image_size lowerCAmelCase_ = depth_multiplier lowerCAmelCase_ = depth_divisible_by lowerCAmelCase_ = min_depth lowerCAmelCase_ = expand_ratio lowerCAmelCase_ = output_stride lowerCAmelCase_ = first_layer_is_expansion lowerCAmelCase_ = finegrained_output lowerCAmelCase_ = hidden_act lowerCAmelCase_ = tf_padding lowerCAmelCase_ = classifier_dropout_prob lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = semantic_loss_ignore_index class __magic_name__ (__lowercase ): lowerCamelCase__ = version.parse('''1.11''' ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict([("pixel_values", {0: "batch"})] ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def __a ( self ) -> float: return 1E-4
22
1
import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() lowerCamelCase__ = 2 class __magic_name__ : def __init__( self , *, # begin keyword-only arguments _a="<s>" , _a="<pad>" , _a="</s>" , _a="<unk>" , _a=None , ) -> int: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = bos, unk, pad, eos lowerCAmelCase_ = [] lowerCAmelCase_ = [] lowerCAmelCase_ = {} lowerCAmelCase_ = self.add_symbol(_a ) lowerCAmelCase_ = self.add_symbol(_a ) lowerCAmelCase_ = self.add_symbol(_a ) lowerCAmelCase_ = self.add_symbol(_a ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(_a ) lowerCAmelCase_ = len(self.symbols ) def __eq__( self , _a ) -> Dict: return self.indices == other.indices def __getitem__( self , _a ) -> List[Any]: if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self ) -> List[str]: return len(self.symbols ) def __contains__( self , _a ) -> str: return sym in self.indices @classmethod def __a ( cls , _a ) -> List[str]: lowerCAmelCase_ = cls() d.add_from_file(_a ) return d def __a ( self , _a , _a=1 , _a=False ) -> List[Any]: if word in self.indices and not overwrite: lowerCAmelCase_ = self.indices[word] lowerCAmelCase_ = self.count[idx] + n return idx else: lowerCAmelCase_ = len(self.symbols ) lowerCAmelCase_ = idx self.symbols.append(_a ) self.count.append(_a ) return idx def __a ( self , _a ) -> str: return 0 def __a ( self , _a ) -> Optional[int]: if isinstance(_a , _a ): try: with open(_a , "r" , encoding="utf-8" ) as fd: self.add_from_file(_a ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(_a ) ) return lowerCAmelCase_ = f.readlines() lowerCAmelCase_ = self._load_meta(_a ) for line in lines[indices_start_line:]: try: lowerCAmelCase_ , lowerCAmelCase_ = line.rstrip().rsplit(" " , 1 ) if field == "#fairseq:overwrite": lowerCAmelCase_ = True lowerCAmelCase_ , lowerCAmelCase_ = line.rsplit(" " , 1 ) else: lowerCAmelCase_ = False lowerCAmelCase_ = int(_a ) lowerCAmelCase_ = line if word in self and not overwrite: raise RuntimeError( "Duplicate word found when loading Dictionary: '{}'. " "Duplicate words can overwrite earlier ones by adding the " "#fairseq:overwrite flag at the end of the corresponding row " "in the dictionary file. If using the Camembert model, please " "download an updated copy of the model file.".format(_a ) ) self.add_symbol(_a , n=_a , overwrite=_a ) except ValueError: raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" ) def A(__a: List[Any] ): # (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up, # e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7} lowerCAmelCase_ = dict((re.sub(r"@@$" , "" , __a ), v) if k.endswith("@@" ) else (re.sub(r"$" , "</w>" , __a ), v) for k, v in d.items() ) lowerCAmelCase_ = "<s> <pad> </s> <unk>".split() # restore the special tokens for k in keep_keys: del da[F"{k}</w>"] lowerCAmelCase_ = d[k] # restore return da def A(__a: Optional[Any] , __a: Dict ): # prep if not os.path.exists(__a ): raise ValueError(F"path {biogpt_checkpoint_path} does not exist!" ) os.makedirs(__a , exist_ok=__a ) print(F"Writing results to {pytorch_dump_folder_path}" ) # handle various types of models lowerCAmelCase_ = os.path.join(__a , "checkpoint.pt" ) if not os.path.isfile(__a ): raise ValueError(F"path to the file {checkpoint_file} does not exist!" ) lowerCAmelCase_ = torch.load(__a , map_location="cpu" ) lowerCAmelCase_ = chkpt["cfg"]["model"] # dicts lowerCAmelCase_ = os.path.join(__a , "dict.txt" ) if not os.path.isfile(__a ): raise ValueError(F"path to the file {dict_file} does not exist!" ) lowerCAmelCase_ = Dictionary.load(__a ) lowerCAmelCase_ = rewrite_dict_keys(src_dict.indices ) lowerCAmelCase_ = len(__a ) lowerCAmelCase_ = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] ) print(F"Generating {src_vocab_file} of {src_vocab_size} records" ) with open(__a , "w" , encoding="utf-8" ) as f: f.write(json.dumps(__a , ensure_ascii=__a , indent=__a ) ) # merges_file (bpecodes) lowerCAmelCase_ = os.path.join(__a , "bpecodes" ) if not os.path.isfile(__a ): raise ValueError(F"path to the file {bpecodes_file} does not exist!" ) lowerCAmelCase_ = os.path.join(__a , VOCAB_FILES_NAMES["merges_file"] ) shutil.copyfile(__a , __a ) # model config lowerCAmelCase_ = os.path.join(__a , "config.json" ) lowerCAmelCase_ = { "activation_dropout": args["activation_dropout"], "architectures": ["BioGptForCausalLM"], "attention_probs_dropout_prob": args["attention_dropout"], "bos_token_id": 0, "eos_token_id": 2, "hidden_act": args["activation_fn"], "hidden_dropout_prob": args["dropout"], "hidden_size": args["decoder_embed_dim"], "initializer_range": 0.02, "intermediate_size": args["decoder_ffn_embed_dim"], "layer_norm_eps": 1E-12, "layerdrop": args["decoder_layerdrop"], "max_position_embeddings": args["max_target_positions"], "model_type": "biogpt", "num_attention_heads": args["decoder_attention_heads"], "num_hidden_layers": args["decoder_layers"], "pad_token_id": 1, "scale_embedding": not args["no_scale_embedding"], "tie_word_embeddings": args["share_decoder_input_output_embed"], "vocab_size": src_vocab_size, } # good hparam defaults to start with print(F"Generating {biogpt_model_config_file}" ) with open(__a , "w" , encoding="utf-8" ) as f: f.write(json.dumps(__a , ensure_ascii=__a , indent=__a ) ) # tokenizer config lowerCAmelCase_ = os.path.join(__a , __a ) lowerCAmelCase_ = { "bos_token": "<s>", "eos_token": "</s>", "model_max_length": 1024, "pad_token": "<pad>", "special_tokens_map_file": None, "tokenizer_class": "BioGptTokenizer", "unk_token": "<unk>", } print(F"Generating {biogpt_tokenizer_config_file}" ) with open(__a , "w" , encoding="utf-8" ) as f: f.write(json.dumps(__a , ensure_ascii=__a , indent=__a ) ) # model lowerCAmelCase_ = chkpt["model"] # remove unneeded keys lowerCAmelCase_ = [ "decoder.version", ] for k in ignore_keys: model_state_dict.pop(__a , __a ) lowerCAmelCase_ = list(model_state_dict.keys() ) for layer_name in layer_names: if layer_name.endswith("output_projection.weight" ): lowerCAmelCase_ = model_state_dict.pop(__a ) else: lowerCAmelCase_ = model_state_dict.pop(__a ) lowerCAmelCase_ = BioGptConfig.from_pretrained(__a ) lowerCAmelCase_ = BioGptForCausalLM(__a ) # check that it loads ok model_new.load_state_dict(__a ) # save lowerCAmelCase_ = os.path.join(__a , __a ) print(F"Generating {pytorch_weights_dump_path}" ) torch.save(__a , __a ) print("Conversion is done!" ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--biogpt_checkpoint_path''', default=None, type=str, required=True, help=( '''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,''' ''' bpecodes, etc.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCamelCase__ = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
22
from __future__ import annotations def A(__a: dict , __a: str ): lowerCAmelCase_ , lowerCAmelCase_ = set(__a ), [start] while stack: lowerCAmelCase_ = stack.pop() explored.add(__a ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(__a ) return explored lowerCamelCase__ = { '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
22
1
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = MobileBertTokenizer lowerCamelCase__ = MobileBertTokenizerFast lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = filter_non_english lowerCamelCase__ = '''google/mobilebert-uncased''' def __a ( self ) -> Optional[Any]: super().setUp() lowerCAmelCase_ = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) lowerCAmelCase_ = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def __a ( self , _a ) -> Any: lowerCAmelCase_ = "UNwant\u00E9d,running" lowerCAmelCase_ = "unwanted, running" return input_text, output_text def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = self.tokenizer_class(self.vocab_file ) lowerCAmelCase_ = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(_a , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] ) def __a ( self ) -> Tuple: if not self.test_rust_tokenizer: return lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = "UNwant\u00E9d,running" lowerCAmelCase_ = tokenizer.tokenize(_a ) lowerCAmelCase_ = rust_tokenizer.tokenize(_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = tokenizer.encode(_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a ) self.assertListEqual(_a , _a ) # With lower casing lowerCAmelCase_ = self.get_tokenizer(do_lower_case=_a ) lowerCAmelCase_ = self.get_rust_tokenizer(do_lower_case=_a ) lowerCAmelCase_ = "UNwant\u00E9d,running" lowerCAmelCase_ = tokenizer.tokenize(_a ) lowerCAmelCase_ = rust_tokenizer.tokenize(_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = tokenizer.encode(_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a ) self.assertListEqual(_a , _a ) def __a ( self ) -> Any: lowerCAmelCase_ = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def __a ( self ) -> Dict: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __a ( self ) -> List[Any]: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def __a ( self ) -> str: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __a ( self ) -> str: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __a ( self ) -> str: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def __a ( self ) -> List[str]: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def __a ( self ) -> Any: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def __a ( self ) -> Any: lowerCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] lowerCAmelCase_ = {} for i, token in enumerate(_a ): lowerCAmelCase_ = i lowerCAmelCase_ = WordpieceTokenizer(vocab=_a , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def __a ( self ) -> Optional[int]: self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def __a ( self ) -> List[str]: self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def __a ( self ) -> Dict: self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def __a ( self ) -> Any: lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) self.assertListEqual( [rust_tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) @slow def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" ) lowerCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a ) lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a , _a ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def __a ( self ) -> Union[str, Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence." lowerCAmelCase_ = tokenizer_r.encode_plus( _a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , ) lowerCAmelCase_ = tokenizer_r.do_lower_case if hasattr(_a , "do_lower_case" ) else False lowerCAmelCase_ = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def __a ( self ) -> Optional[int]: lowerCAmelCase_ = ["的", "人", "有"] lowerCAmelCase_ = "".join(_a ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCAmelCase_ = True lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a ) lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_a , _a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = False lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a ) lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a ) # it is expected that only the first Chinese character is not preceded by "##". lowerCAmelCase_ = [ f"##{token}" if idx != 0 else token for idx, token in enumerate(_a ) ] self.assertListEqual(_a , _a ) self.assertListEqual(_a , _a )
22
def A(__a: Tuple ): lowerCAmelCase_ = len(__a ) while cur > 1: # Find the maximum number in arr lowerCAmelCase_ = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi lowerCAmelCase_ = arr[mi::-1] + arr[mi + 1 : len(__a )] # Reverse whole list lowerCAmelCase_ = arr[cur - 1 :: -1] + arr[cur : len(__a )] cur -= 1 return arr if __name__ == "__main__": lowerCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip() lowerCamelCase__ = [int(item) for item in user_input.split(''',''')] print(pancake_sort(unsorted))
22
1
import functools from typing import Any def A(__a: str , __a: list[str] ): # Validation if not isinstance(__a , __a ) or len(__a ) == 0: raise ValueError("the string should be not empty string" ) if not isinstance(__a , __a ) or not all( isinstance(__a , __a ) and len(__a ) > 0 for item in words ): raise ValueError("the words should be a list of non-empty strings" ) # Build trie lowerCAmelCase_ = {} lowerCAmelCase_ = "WORD_KEEPER" for word in words: lowerCAmelCase_ = trie for c in word: if c not in trie_node: lowerCAmelCase_ = {} lowerCAmelCase_ = trie_node[c] lowerCAmelCase_ = True lowerCAmelCase_ = len(__a ) # Dynamic programming method @functools.cache def is_breakable(__a: int ) -> bool: if index == len_string: return True lowerCAmelCase_ = trie for i in range(__a , __a ): lowerCAmelCase_ = trie_node.get(string[i] , __a ) if trie_node is None: return False if trie_node.get(__a , __a ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
22
import string from math import logaa def A(__a: str , __a: str ): lowerCAmelCase_ = document.translate( str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" ) lowerCAmelCase_ = document_without_punctuation.split(" " ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def A(__a: str , __a: str ): lowerCAmelCase_ = corpus.lower().translate( str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with '' lowerCAmelCase_ = corpus_without_punctuation.split("\n" ) lowerCAmelCase_ = term.lower() return (len([doc for doc in docs if term in doc] ), len(__a )) def A(__a: int , __a: int , __a: List[Any]=False ): if smoothing: if n == 0: raise ValueError("log10(0) is undefined." ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("df must be > 0" ) elif n == 0: raise ValueError("log10(0) is undefined." ) return round(logaa(n / df ) , 3 ) def A(__a: int , __a: int ): return round(tf * idf , 3 )
22
1
import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py lowerCamelCase__ = '''src/transformers''' # This is to make sure the transformers module imported is the one in the repo. lowerCamelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS) lowerCamelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` lowerCamelCase__ = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''') lowerCamelCase__ = { '''DecisionTransformerConfig''', '''EncoderDecoderConfig''', '''MusicgenConfig''', '''RagConfig''', '''SpeechEncoderDecoderConfig''', '''TimmBackboneConfig''', '''VisionEncoderDecoderConfig''', '''VisionTextDualEncoderConfig''', '''LlamaConfig''', } def A(__a: int ): lowerCAmelCase_ = None # source code of `config_class` lowerCAmelCase_ = inspect.getsource(__a ) lowerCAmelCase_ = _re_checkpoint.findall(__a ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith("/" ): lowerCAmelCase_ = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link lowerCAmelCase_ = F"https://huggingface.co/{ckpt_name}" if ckpt_link == ckpt_link_from_name: lowerCAmelCase_ = ckpt_name break return checkpoint def A(): lowerCAmelCase_ = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue lowerCAmelCase_ = get_checkpoint_from_config_class(__a ) lowerCAmelCase_ = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(__a ) if len(__a ) > 0: lowerCAmelCase_ = "\n".join(sorted(__a ) ) raise ValueError(F"The following configurations don't contain any valid checkpoint:\n{message}" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
22
import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef lowerCamelCase__ = ( '''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' ) def A(__a: str , __a: List[Any] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) return (preds == labels).mean() def A(__a: Any , __a: Any ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) lowerCAmelCase_ = simple_accuracy(__a , __a ) lowerCAmelCase_ = fa_score(y_true=__a , y_pred=__a ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def A(__a: List[str] , __a: Optional[int] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) lowerCAmelCase_ = pearsonr(__a , __a )[0] lowerCAmelCase_ = spearmanr(__a , __a )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def A(__a: Union[str, Any] , __a: Any , __a: str ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) assert len(__a ) == len(__a ), F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" if task_name == "cola": return {"mcc": matthews_corrcoef(__a , __a )} elif task_name == "sst-2": return {"acc": simple_accuracy(__a , __a )} elif task_name == "mrpc": return acc_and_fa(__a , __a ) elif task_name == "sts-b": return pearson_and_spearman(__a , __a ) elif task_name == "qqp": return acc_and_fa(__a , __a ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(__a , __a )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(__a , __a )} elif task_name == "qnli": return {"acc": simple_accuracy(__a , __a )} elif task_name == "rte": return {"acc": simple_accuracy(__a , __a )} elif task_name == "wnli": return {"acc": simple_accuracy(__a , __a )} elif task_name == "hans": return {"acc": simple_accuracy(__a , __a )} else: raise KeyError(__a ) def A(__a: int , __a: Optional[Any] , __a: Optional[Any] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) if len(__a ) != len(__a ): raise ValueError(F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" ) if task_name == "xnli": return {"acc": simple_accuracy(__a , __a )} else: raise KeyError(__a )
22
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { '''post_extract_proj''': '''feature_projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.upsample.0''': '''encoder.upsample.projection''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''layer_norm''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } def A(__a: Dict , __a: Tuple , __a: int , __a: Optional[Any] , __a: Optional[int] ): for attribute in key.split("." ): lowerCAmelCase_ = getattr(__a , __a ) if weight_type is not None: lowerCAmelCase_ = getattr(__a , __a ).shape else: lowerCAmelCase_ = hf_pointer.shape assert hf_shape == value.shape, ( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": lowerCAmelCase_ = value elif weight_type == "weight_g": lowerCAmelCase_ = value elif weight_type == "weight_v": lowerCAmelCase_ = value elif weight_type == "bias": lowerCAmelCase_ = value else: lowerCAmelCase_ = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def A(__a: Optional[int] , __a: int , __a: List[str] ): lowerCAmelCase_ = [] lowerCAmelCase_ = fairseq_model.state_dict() lowerCAmelCase_ = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): lowerCAmelCase_ = False if "conv_layers" in name: load_conv_layer( __a , __a , __a , __a , hf_model.config.feat_extract_norm == "group" , ) lowerCAmelCase_ = True else: for key, mapped_key in MAPPING.items(): lowerCAmelCase_ = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: lowerCAmelCase_ = True if "*" in mapped_key: lowerCAmelCase_ = name.split(__a )[0].split("." )[-2] lowerCAmelCase_ = mapped_key.replace("*" , __a ) if "weight_g" in name: lowerCAmelCase_ = "weight_g" elif "weight_v" in name: lowerCAmelCase_ = "weight_v" elif "weight" in name: lowerCAmelCase_ = "weight" elif "bias" in name: lowerCAmelCase_ = "bias" else: lowerCAmelCase_ = None set_recursively(__a , __a , __a , __a , __a ) continue if not is_used: unused_weights.append(__a ) logger.warning(F"Unused weights: {unused_weights}" ) def A(__a: int , __a: Dict , __a: Dict , __a: Any , __a: Union[str, Any] ): lowerCAmelCase_ = full_name.split("conv_layers." )[-1] lowerCAmelCase_ = name.split("." ) lowerCAmelCase_ = int(items[0] ) lowerCAmelCase_ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) lowerCAmelCase_ = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) lowerCAmelCase_ = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) lowerCAmelCase_ = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) lowerCAmelCase_ = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(__a ) def A(__a: Optional[int] , __a: int ): lowerCAmelCase_ = SEWConfig() if is_finetuned: lowerCAmelCase_ = model.wav_encoder.wav_model.cfg else: lowerCAmelCase_ = model.cfg lowerCAmelCase_ = fs_config.conv_bias lowerCAmelCase_ = eval(fs_config.conv_feature_layers ) lowerCAmelCase_ = [x[0] for x in conv_layers] lowerCAmelCase_ = [x[1] for x in conv_layers] lowerCAmelCase_ = [x[2] for x in conv_layers] lowerCAmelCase_ = "gelu" lowerCAmelCase_ = "layer" if fs_config.extractor_mode == "layer_norm" else "group" lowerCAmelCase_ = 0.0 lowerCAmelCase_ = fs_config.activation_fn.name lowerCAmelCase_ = fs_config.encoder_embed_dim lowerCAmelCase_ = 0.02 lowerCAmelCase_ = fs_config.encoder_ffn_embed_dim lowerCAmelCase_ = 1E-5 lowerCAmelCase_ = fs_config.encoder_layerdrop lowerCAmelCase_ = fs_config.encoder_attention_heads lowerCAmelCase_ = fs_config.conv_pos_groups lowerCAmelCase_ = fs_config.conv_pos lowerCAmelCase_ = len(__a ) lowerCAmelCase_ = fs_config.encoder_layers lowerCAmelCase_ = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: lowerCAmelCase_ = model.cfg lowerCAmelCase_ = fs_config.final_dropout lowerCAmelCase_ = fs_config.layerdrop lowerCAmelCase_ = fs_config.activation_dropout lowerCAmelCase_ = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 lowerCAmelCase_ = fs_config.attention_dropout lowerCAmelCase_ = fs_config.dropout_input lowerCAmelCase_ = fs_config.dropout lowerCAmelCase_ = fs_config.mask_channel_length lowerCAmelCase_ = fs_config.mask_channel_prob lowerCAmelCase_ = fs_config.mask_length lowerCAmelCase_ = fs_config.mask_prob lowerCAmelCase_ = "Wav2Vec2FeatureExtractor" lowerCAmelCase_ = "Wav2Vec2CTCTokenizer" return config @torch.no_grad() def A(__a: str , __a: Union[str, Any] , __a: Dict=None , __a: List[Any]=None , __a: Tuple=True ): if is_finetuned: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: lowerCAmelCase_ = SEWConfig.from_pretrained(__a ) else: lowerCAmelCase_ = convert_config(model[0] , __a ) lowerCAmelCase_ = model[0].eval() lowerCAmelCase_ = True if config.feat_extract_norm == "layer" else False lowerCAmelCase_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , ) if is_finetuned: if dict_path: lowerCAmelCase_ = Dictionary.load(__a ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowerCAmelCase_ = target_dict.pad_index lowerCAmelCase_ = target_dict.bos_index lowerCAmelCase_ = target_dict.pad_index lowerCAmelCase_ = target_dict.bos_index lowerCAmelCase_ = target_dict.eos_index lowerCAmelCase_ = len(target_dict.symbols ) lowerCAmelCase_ = os.path.join(__a , "vocab.json" ) if not os.path.isdir(__a ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__a ) ) return os.makedirs(__a , exist_ok=__a ) with open(__a , "w" , encoding="utf-8" ) as vocab_handle: json.dump(target_dict.indices , __a ) lowerCAmelCase_ = WavaVecaCTCTokenizer( __a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__a , ) lowerCAmelCase_ = WavaVecaProcessor(feature_extractor=__a , tokenizer=__a ) processor.save_pretrained(__a ) lowerCAmelCase_ = SEWForCTC(__a ) else: lowerCAmelCase_ = SEWModel(__a ) feature_extractor.save_pretrained(__a ) recursively_load_weights(__a , __a , __a ) hf_model.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--is_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) lowerCamelCase__ = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
22
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __magic_name__ (__lowercase ): lowerCamelCase__ = ['''image_processor''', '''tokenizer'''] lowerCamelCase__ = '''ViTImageProcessor''' lowerCamelCase__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self , _a=None , _a=None , **_a ) -> Tuple: lowerCAmelCase_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , _a , ) lowerCAmelCase_ = kwargs.pop("feature_extractor" ) lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_a , _a ) def __call__( self , _a=None , _a=None , _a=None , _a=None , **_a ) -> Dict: if text is None and visual_prompt is None and images is None: raise ValueError("You have to specify either text, visual prompt or images." ) if text is not None and visual_prompt is not None: raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." ) if text is not None: lowerCAmelCase_ = self.tokenizer(_a , return_tensors=_a , **_a ) if visual_prompt is not None: lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a ) if images is not None: lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a ) if visual_prompt is not None and images is not None: lowerCAmelCase_ = { "pixel_values": image_features.pixel_values, "conditional_pixel_values": prompt_features.pixel_values, } return encoding elif text is not None and images is not None: lowerCAmelCase_ = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: lowerCAmelCase_ = { "conditional_pixel_values": prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**_a ) , tensor_type=_a ) def __a ( self , *_a , **_a ) -> List[str]: return self.tokenizer.batch_decode(*_a , **_a ) def __a ( self , *_a , **_a ) -> Optional[int]: return self.tokenizer.decode(*_a , **_a ) @property def __a ( self ) -> List[str]: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _a , ) return self.image_processor_class @property def __a ( self ) -> Optional[Any]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _a , ) return self.image_processor
22
1
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers lowerCamelCase__ = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append('''dataclasses''') if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append('''importlib_metadata''') for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def A(__a: Dict , __a: List[str]=None ): require_version(deps[pkg] , __a )
22
import datasets lowerCamelCase__ = '''\ @InProceedings{conneau2018xnli, author = "Conneau, Alexis and Rinott, Ruty and Lample, Guillaume and Williams, Adina and Bowman, Samuel R. and Schwenk, Holger and Stoyanov, Veselin", title = "XNLI: Evaluating Cross-lingual Sentence Representations", booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", year = "2018", publisher = "Association for Computational Linguistics", location = "Brussels, Belgium", } ''' lowerCamelCase__ = '''\ XNLI is a subset of a few thousand examples from MNLI which has been translated into a 14 different languages (some low-ish resource). As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels). ''' lowerCamelCase__ = ''' Computes XNLI score which is just simple accuracy. Args: predictions: Predicted labels. references: Ground truth labels. Returns: \'accuracy\': accuracy Examples: >>> predictions = [0, 1] >>> references = [0, 1] >>> xnli_metric = datasets.load_metric("xnli") >>> results = xnli_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} ''' def A(__a: Dict , __a: Union[str, Any] ): return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ (datasets.Metric ): def __a ( self ) -> Tuple: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), "references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), } ) , codebase_urls=[] , reference_urls=[] , format="numpy" , ) def __a ( self , _a , _a ) -> List[str]: return {"accuracy": simple_accuracy(_a , _a )}
22
1
from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class __magic_name__ (unittest.TestCase ): @slow def __a ( self ) -> Any: # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: lowerCAmelCase_ = AutoConfig.from_pretrained(_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) lowerCAmelCase_ = TFAutoModel.from_pretrained(_a , from_pt=_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) lowerCAmelCase_ = AutoModel.from_pretrained(_a , from_tf=_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) @slow def __a ( self ) -> List[Any]: # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: lowerCAmelCase_ = AutoConfig.from_pretrained(_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) lowerCAmelCase_ = TFAutoModelForPreTraining.from_pretrained(_a , from_pt=_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) lowerCAmelCase_ = AutoModelForPreTraining.from_pretrained(_a , from_tf=_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) @slow def __a ( self ) -> str: for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ = AutoConfig.from_pretrained(_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) lowerCAmelCase_ = TFAutoModelForCausalLM.from_pretrained(_a , from_pt=_a ) lowerCAmelCase_ , lowerCAmelCase_ = TFAutoModelForCausalLM.from_pretrained( _a , output_loading_info=_a , from_pt=_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(_a , from_tf=_a ) lowerCAmelCase_ , lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained( _a , output_loading_info=_a , from_tf=_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) @slow def __a ( self ) -> Dict: for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ = AutoConfig.from_pretrained(_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) lowerCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(_a , from_pt=_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) lowerCAmelCase_ = AutoModelWithLMHead.from_pretrained(_a , from_tf=_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) @slow def __a ( self ) -> Any: for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ = AutoConfig.from_pretrained(_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) lowerCAmelCase_ = TFAutoModelForMaskedLM.from_pretrained(_a , from_pt=_a ) lowerCAmelCase_ , lowerCAmelCase_ = TFAutoModelForMaskedLM.from_pretrained( _a , output_loading_info=_a , from_pt=_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) lowerCAmelCase_ = AutoModelForMaskedLM.from_pretrained(_a , from_tf=_a ) lowerCAmelCase_ , lowerCAmelCase_ = AutoModelForMaskedLM.from_pretrained( _a , output_loading_info=_a , from_tf=_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) @slow def __a ( self ) -> str: for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ = AutoConfig.from_pretrained(_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) lowerCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(_a , from_pt=_a ) lowerCAmelCase_ , lowerCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained( _a , output_loading_info=_a , from_pt=_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) lowerCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(_a , from_tf=_a ) lowerCAmelCase_ , lowerCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained( _a , output_loading_info=_a , from_tf=_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) @slow def __a ( self ) -> Any: # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: lowerCAmelCase_ = AutoConfig.from_pretrained(_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) lowerCAmelCase_ = TFAutoModelForSequenceClassification.from_pretrained(_a , from_pt=_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) lowerCAmelCase_ = AutoModelForSequenceClassification.from_pretrained(_a , from_tf=_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) @slow def __a ( self ) -> Any: # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: lowerCAmelCase_ = AutoConfig.from_pretrained(_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) lowerCAmelCase_ = TFAutoModelForQuestionAnswering.from_pretrained(_a , from_pt=_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) lowerCAmelCase_ = AutoModelForQuestionAnswering.from_pretrained(_a , from_tf=_a ) self.assertIsNotNone(_a ) self.assertIsInstance(_a , _a ) def __a ( self ) -> str: lowerCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(_a , from_pt=_a ) self.assertIsInstance(_a , _a ) self.assertEqual(model.num_parameters() , 14410 ) self.assertEqual(model.num_parameters(only_trainable=_a ) , 14410 ) lowerCAmelCase_ = AutoModelWithLMHead.from_pretrained(_a , from_tf=_a ) self.assertIsInstance(_a , _a ) self.assertEqual(model.num_parameters() , 14410 ) self.assertEqual(model.num_parameters(only_trainable=_a ) , 14410 ) def __a ( self ) -> List[str]: lowerCAmelCase_ = TFAutoModelWithLMHead.from_pretrained(_a , from_pt=_a ) self.assertIsInstance(_a , _a ) self.assertEqual(model.num_parameters() , 14410 ) self.assertEqual(model.num_parameters(only_trainable=_a ) , 14410 ) lowerCAmelCase_ = AutoModelWithLMHead.from_pretrained(_a , from_tf=_a ) self.assertIsInstance(_a , _a ) self.assertEqual(model.num_parameters() , 14410 ) self.assertEqual(model.num_parameters(only_trainable=_a ) , 14410 )
22
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset lowerCamelCase__ = '''bert-base-cased''' lowerCamelCase__ = '''google/pegasus-xsum''' lowerCamelCase__ = [''' Sam ate lunch today.''', '''Sams lunch ingredients.'''] lowerCamelCase__ = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee'''] lowerCamelCase__ = '''patrickvonplaten/t5-tiny-random''' lowerCamelCase__ = '''sshleifer/bart-tiny-random''' lowerCamelCase__ = '''sshleifer/tiny-mbart''' lowerCamelCase__ = '''sshleifer/tiny-marian-en-de''' def A(__a: Path , __a: list ): lowerCAmelCase_ = "\n".join(__a ) Path(__a ).open("w" ).writelines(__a ) def A(__a: str ): for split in ["train", "val", "test"]: _dump_articles(os.path.join(__a , F"{split}.source" ) , __a ) _dump_articles(os.path.join(__a , F"{split}.target" ) , __a ) return tmp_dir class __magic_name__ (__lowercase ): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def __a ( self , _a ) -> Dict: lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a ) lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES ) lowerCAmelCase_ = 4 lowerCAmelCase_ = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated lowerCAmelCase_ , lowerCAmelCase_ = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error. lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , src_lang=_a , tgt_lang=_a , ) lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(_a , _a ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place lowerCAmelCase_ = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def __a ( self , _a ) -> str: lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a ) lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES ) lowerCAmelCase_ = 4 lowerCAmelCase_ = LegacySeqaSeqDataset( _a , data_dir=_a , type_path="train" , max_source_length=20 , max_target_length=_a , ) lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" ) lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) lowerCAmelCase_ = tmp_dir.joinpath("train.source" ).open().readlines() lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(_a , _a , 128 , _a ) lowerCAmelCase_ = {x.name for x in tmp_dir.iterdir()} lowerCAmelCase_ = {x.name for x in save_dir.iterdir()} lowerCAmelCase_ = save_dir.joinpath("train.source" ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(_a ) < len(_a ) assert len(_a ) == 1 assert len(packed_examples[0] ) == sum(len(_a ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" ) def __a ( self ) -> Any: if not FAIRSEQ_AVAILABLE: return lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=64 ) lowerCAmelCase_ = 64 lowerCAmelCase_ = ds.make_dynamic_sampler(_a , required_batch_size_multiple=_a ) lowerCAmelCase_ = [len(_a ) for x in batch_sampler] assert len(set(_a ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(_a ) == len(_a ) # no dropped or added examples lowerCAmelCase_ = DataLoader(_a , batch_sampler=_a , collate_fn=ds.collate_fn , num_workers=2 ) lowerCAmelCase_ = [] lowerCAmelCase_ = [] for batch in data_loader: lowerCAmelCase_ = batch["input_ids"].shape lowerCAmelCase_ = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple lowerCAmelCase_ = np.product(batch["input_ids"].shape ) num_src_per_batch.append(_a ) if num_src_tokens > (max_tokens * 1.1): failures.append(_a ) assert num_src_per_batch[0] == max(_a ) if failures: raise AssertionError(f"too many tokens in {len(_a )} batches" ) def __a ( self ) -> List[str]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=512 ) lowerCAmelCase_ = 2 lowerCAmelCase_ = ds.make_sortish_sampler(_a , shuffle=_a ) lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 ) lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 , sampler=_a ) lowerCAmelCase_ = tokenizer.pad_token_id def count_pad_tokens(_a , _a="input_ids" ): return [batch[k].eq(_a ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(_a , k="labels" ) ) < sum(count_pad_tokens(_a , k="labels" ) ) assert sum(count_pad_tokens(_a ) ) < sum(count_pad_tokens(_a ) ) assert len(_a ) == len(_a ) def __a ( self , _a=1000 , _a=128 ) -> str: if os.getenv("USE_REAL_DATA" , _a ): lowerCAmelCase_ = "examples/seq2seq/wmt_en_ro" lowerCAmelCase_ = max_len * 2 * 64 if not Path(_a ).joinpath("train.len" ).exists(): save_len_file(_a , _a ) else: lowerCAmelCase_ = "examples/seq2seq/test_data/wmt_en_ro" lowerCAmelCase_ = max_len * 4 save_len_file(_a , _a ) lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a ) lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , n_obs=_a , ) return ds, max_tokens, tokenizer def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset() lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=0 , add_extra_examples=_a ) ) lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=1 , add_extra_examples=_a ) ) assert idsa.intersection(_a ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def __a ( self , _a ) -> List[str]: lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a , use_fast=_a ) if tok_name == MBART_TINY: lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , ) lowerCAmelCase_ = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , ) lowerCAmelCase_ = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(_a ) == 1 if tok_name == BART_TINY else len(_a ) == 0
22
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCamelCase__ = logging.get_logger(__name__) class __magic_name__ (__lowercase ): lowerCamelCase__ = ['''pixel_values'''] def __init__( self , _a = True , _a = None , _a = None , _a = PILImageResampling.BILINEAR , _a = True , _a = 1 / 255 , _a = True , _a = None , _a = None , **_a , ) -> None: super().__init__(**_a ) lowerCAmelCase_ = size if size is not None else {"shortest_edge": 384} lowerCAmelCase_ = get_size_dict(_a , default_to_square=_a ) lowerCAmelCase_ = do_resize lowerCAmelCase_ = size # Default value set here for backwards compatibility where the value in config is None lowerCAmelCase_ = crop_pct if crop_pct is not None else 224 / 256 lowerCAmelCase_ = resample lowerCAmelCase_ = do_rescale lowerCAmelCase_ = rescale_factor lowerCAmelCase_ = do_normalize lowerCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD def __a ( self , _a , _a , _a , _a = PILImageResampling.BICUBIC , _a = None , **_a , ) -> np.ndarray: lowerCAmelCase_ = get_size_dict(_a , default_to_square=_a ) if "shortest_edge" not in size: raise ValueError(f"Size dictionary must contain 'shortest_edge' key. Got {size.keys()}" ) lowerCAmelCase_ = size["shortest_edge"] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct lowerCAmelCase_ = int(shortest_edge / crop_pct ) lowerCAmelCase_ = get_resize_output_image_size(_a , size=_a , default_to_square=_a ) lowerCAmelCase_ = resize(image=_a , size=_a , resample=_a , data_format=_a , **_a ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=_a , size=(shortest_edge, shortest_edge) , data_format=_a , **_a ) else: # warping (no cropping) when evaluated at 384 or larger return resize( _a , size=(shortest_edge, shortest_edge) , resample=_a , data_format=_a , **_a ) def __a ( self , _a , _a , _a = None , **_a , ) -> Any: return rescale(_a , scale=_a , data_format=_a , **_a ) def __a ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray: return normalize(_a , mean=_a , std=_a , data_format=_a , **_a ) def __a ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image: lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize lowerCAmelCase_ = crop_pct if crop_pct is not None else self.crop_pct lowerCAmelCase_ = resample if resample is not None else self.resample lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase_ = image_mean if image_mean is not None else self.image_mean lowerCAmelCase_ = image_std if image_std is not None else self.image_std lowerCAmelCase_ = size if size is not None else self.size lowerCAmelCase_ = get_size_dict(_a , default_to_square=_a ) lowerCAmelCase_ = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError("crop_pct must be specified if size < 384." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. lowerCAmelCase_ = [to_numpy_array(_a ) for image in images] if do_resize: lowerCAmelCase_ = [self.resize(image=_a , size=_a , crop_pct=_a , resample=_a ) for image in images] if do_rescale: lowerCAmelCase_ = [self.rescale(image=_a , scale=_a ) for image in images] if do_normalize: lowerCAmelCase_ = [self.normalize(image=_a , mean=_a , std=_a ) for image in images] lowerCAmelCase_ = [to_channel_dimension_format(_a , _a ) for image in images] lowerCAmelCase_ = {"pixel_values": images} return BatchFeature(data=_a , tensor_type=_a )
22
def A(__a: Optional[Any] ): lowerCAmelCase_ = len(__a ) lowerCAmelCase_ = sum(__a ) lowerCAmelCase_ = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): lowerCAmelCase_ = True for i in range(1 , s + 1 ): lowerCAmelCase_ = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): lowerCAmelCase_ = dp[i][j - 1] if arr[i - 1] <= j: lowerCAmelCase_ = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: lowerCAmelCase_ = s - 2 * j break return diff
22
1
import re from filelock import FileLock try: import nltk lowerCamelCase__ = True except (ImportError, ModuleNotFoundError): lowerCamelCase__ = False if NLTK_AVAILABLE: with FileLock('''.lock''') as lock: nltk.download('''punkt''', quiet=True) def A(__a: str ): re.sub("<n>" , "" , __a ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__a ) )
22
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def A(__a: Any , __a: Union[str, Any] , __a: List[str] ): lowerCAmelCase_ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] lowerCAmelCase_ = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } lowerCAmelCase_ = F"{src_lang}-{tgt_lang}" lowerCAmelCase_ = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n" os.makedirs(__a , exist_ok=__a ) lowerCAmelCase_ = os.path.join(__a , "README.md" ) print(F"Generating {path}" ) with open(__a , "w" , encoding="utf-8" ) as f: f.write(__a ) # make sure we are under the root of the project lowerCamelCase__ = Path(__file__).resolve().parent.parent.parent lowerCamelCase__ = repo_dir / '''model_cards''' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model_name.split('''-''') lowerCamelCase__ = model_cards_dir / '''facebook''' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
22
1
def A(__a: list ): if len(__a ) <= 1: return lst lowerCAmelCase_ = 1 while i < len(__a ): if lst[i - 1] <= lst[i]: i += 1 else: lowerCAmelCase_ , lowerCAmelCase_ = lst[i], lst[i - 1] i -= 1 if i == 0: lowerCAmelCase_ = 1 return lst if __name__ == "__main__": lowerCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip() lowerCamelCase__ = [int(item) for item in user_input.split(''',''')] print(gnome_sort(unsorted))
22
import re from filelock import FileLock try: import nltk lowerCamelCase__ = True except (ImportError, ModuleNotFoundError): lowerCamelCase__ = False if NLTK_AVAILABLE: with FileLock('''.lock''') as lock: nltk.download('''punkt''', quiet=True) def A(__a: str ): re.sub("<n>" , "" , __a ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__a ) )
22
1
from abc import ABC, abstractmethod from argparse import ArgumentParser class __magic_name__ (__lowercase ): @staticmethod @abstractmethod def __a ( _a ) -> List[str]: raise NotImplementedError() @abstractmethod def __a ( self ) -> Any: raise NotImplementedError()
22
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowerCamelCase__ = { '''configuration_encodec''': [ '''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''EncodecConfig''', ], '''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''', '''EncodecModel''', '''EncodecPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
1
from __future__ import annotations from statistics import mean def A(__a: list[int] , __a: list[int] , __a: int ): lowerCAmelCase_ = [0] * no_of_processes lowerCAmelCase_ = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(__a ): lowerCAmelCase_ = burst_time[i] lowerCAmelCase_ = [] lowerCAmelCase_ = 0 lowerCAmelCase_ = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: lowerCAmelCase_ = [] lowerCAmelCase_ = -1 for i in range(__a ): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(__a ) if len(__a ) > 0: lowerCAmelCase_ = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: lowerCAmelCase_ = i total_time += burst_time[target_process] completed += 1 lowerCAmelCase_ = 0 lowerCAmelCase_ = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def A(__a: list[int] , __a: int , __a: list[int] ): lowerCAmelCase_ = [0] * no_of_processes for i in range(__a ): lowerCAmelCase_ = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print('''[TEST CASE 01]''') lowerCamelCase__ = 4 lowerCamelCase__ = [2, 5, 3, 7] lowerCamelCase__ = [0, 0, 0, 0] lowerCamelCase__ = calculate_waitingtime(arrival_time, burst_time, no_of_processes) lowerCamelCase__ = calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''') for i, process_id in enumerate(list(range(1, 5))): print( F'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t''' F'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}''' ) print(F'''\nAverage waiting time = {mean(waiting_time):.5f}''') print(F'''Average turnaround time = {mean(turn_around_time):.5f}''')
22
import logging from transformers import PretrainedConfig lowerCamelCase__ = logging.getLogger(__name__) lowerCamelCase__ = { '''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''', } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''bertabs''' def __init__( self , _a=30522 , _a=512 , _a=6 , _a=512 , _a=8 , _a=512 , _a=0.2 , _a=6 , _a=768 , _a=8 , _a=2048 , _a=0.2 , **_a , ) -> List[Any]: super().__init__(**_a ) lowerCAmelCase_ = vocab_size lowerCAmelCase_ = max_pos lowerCAmelCase_ = enc_layers lowerCAmelCase_ = enc_hidden_size lowerCAmelCase_ = enc_heads lowerCAmelCase_ = enc_ff_size lowerCAmelCase_ = enc_dropout lowerCAmelCase_ = dec_layers lowerCAmelCase_ = dec_hidden_size lowerCAmelCase_ = dec_heads lowerCAmelCase_ = dec_ff_size lowerCAmelCase_ = dec_dropout
22
1
import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = RobertaTokenizer lowerCamelCase__ = RobertaTokenizerFast lowerCamelCase__ = True lowerCamelCase__ = {'''cls_token''': '''<s>'''} def __a ( self ) -> Optional[int]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase_ = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] lowerCAmelCase_ = dict(zip(_a , range(len(_a ) ) ) ) lowerCAmelCase_ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowerCAmelCase_ = {"unk_token": "<unk>"} lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(_a ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(_a ) ) def __a ( self , **_a ) -> Any: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **_a ) def __a ( self , **_a ) -> List[Any]: kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **_a ) def __a ( self , _a ) -> Dict: lowerCAmelCase_ = "lower newer" lowerCAmelCase_ = "lower newer" return input_text, output_text def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowerCAmelCase_ = "lower newer" lowerCAmelCase_ = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"] lowerCAmelCase_ = tokenizer.tokenize(_a ) # , add_prefix_space=True) self.assertListEqual(_a , _a ) lowerCAmelCase_ = tokens + [tokenizer.unk_token] lowerCAmelCase_ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a ) def __a ( self ) -> int: lowerCAmelCase_ = self.get_tokenizer() self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=_a ) , [0, 31414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=_a ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , ) @slow def __a ( self ) -> int: lowerCAmelCase_ = self.tokenizer_class.from_pretrained("roberta-base" ) lowerCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer.encode( "sequence builders" , add_special_tokens=_a , add_prefix_space=_a ) lowerCAmelCase_ = tokenizer.encode( "sequence builders" , "multi-sequence build" , add_special_tokens=_a , add_prefix_space=_a ) lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a ) lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a , _a ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = "Encode this sequence." lowerCAmelCase_ = tokenizer.byte_encoder[" ".encode("utf-8" )[0]] # Testing encoder arguments lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a , add_prefix_space=_a ) lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(_a , _a ) lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a , add_prefix_space=_a ) lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(_a , _a ) tokenizer.add_special_tokens({"bos_token": "<s>"} ) lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(_a , _a ) # Testing spaces after special tokens lowerCAmelCase_ = "<mask>" tokenizer.add_special_tokens( {"mask_token": AddedToken(_a , lstrip=_a , rstrip=_a )} ) # mask token has a left space lowerCAmelCase_ = tokenizer.convert_tokens_to_ids(_a ) lowerCAmelCase_ = "Encode <mask> sequence" lowerCAmelCase_ = "Encode <mask>sequence" lowerCAmelCase_ = tokenizer.encode(_a ) lowerCAmelCase_ = encoded.index(_a ) lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(_a , _a ) lowerCAmelCase_ = tokenizer.encode(_a ) lowerCAmelCase_ = encoded.index(_a ) lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(_a , _a ) def __a ( self ) -> Any: pass def __a ( self ) -> Optional[int]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = "A, <mask> AllenNLP sentence." lowerCAmelCase_ = tokenizer_r.encode_plus(_a , add_special_tokens=_a , return_token_type_ids=_a ) lowerCAmelCase_ = tokenizer_p.encode_plus(_a , add_special_tokens=_a , return_token_type_ids=_a ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( _a , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( _a , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) def __a ( self ) -> Any: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=_a , add_prefix_space=_a , trim_offsets=_a ) lowerCAmelCase_ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) lowerCAmelCase_ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["add_prefix_space"] , _a ) self.assertEqual(post_processor_state["add_prefix_space"] , _a ) self.assertEqual(post_processor_state["trim_offsets"] , _a ) def __a ( self ) -> Optional[int]: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCAmelCase_ = "hello" # `hello` is a token in the vocabulary of `pretrained_name` lowerCAmelCase_ = f"{text_of_1_token} {text_of_1_token}" lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained( _a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a ) lowerCAmelCase_ = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_a ) + 1, len(_a ) + 1 + len(_a )) , ) lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained( _a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a ) lowerCAmelCase_ = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_a ) + 1, len(_a ) + 1 + len(_a )) , ) lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained( _a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a ) lowerCAmelCase_ = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_a ), len(_a ) + 1 + len(_a )) , ) lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained( _a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a ) lowerCAmelCase_ = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_a ), len(_a ) + 1 + len(_a )) , ) lowerCAmelCase_ = f" {text}" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained( _a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a ) lowerCAmelCase_ = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_a )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_a ) + 1, 1 + len(_a ) + 1 + len(_a )) , ) lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained( _a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a ) lowerCAmelCase_ = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_a )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_a ), 1 + len(_a ) + 1 + len(_a )) , ) lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained( _a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a ) lowerCAmelCase_ = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_a )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_a ), 1 + len(_a ) + 1 + len(_a )) , )
22
import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def A(__a: Tuple , __a: Union[str, Any] ): lowerCAmelCase_ = checkpoint lowerCAmelCase_ = {} lowerCAmelCase_ = vae_state_dict["encoder.conv_in.weight"] lowerCAmelCase_ = vae_state_dict["encoder.conv_in.bias"] lowerCAmelCase_ = vae_state_dict["encoder.conv_out.weight"] lowerCAmelCase_ = vae_state_dict["encoder.conv_out.bias"] lowerCAmelCase_ = vae_state_dict["encoder.norm_out.weight"] lowerCAmelCase_ = vae_state_dict["encoder.norm_out.bias"] lowerCAmelCase_ = vae_state_dict["decoder.conv_in.weight"] lowerCAmelCase_ = vae_state_dict["decoder.conv_in.bias"] lowerCAmelCase_ = vae_state_dict["decoder.conv_out.weight"] lowerCAmelCase_ = vae_state_dict["decoder.conv_out.bias"] lowerCAmelCase_ = vae_state_dict["decoder.norm_out.weight"] lowerCAmelCase_ = vae_state_dict["decoder.norm_out.bias"] lowerCAmelCase_ = vae_state_dict["quant_conv.weight"] lowerCAmelCase_ = vae_state_dict["quant_conv.bias"] lowerCAmelCase_ = vae_state_dict["post_quant_conv.weight"] lowerCAmelCase_ = vae_state_dict["post_quant_conv.bias"] # Retrieves the keys for the encoder down blocks only lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} ) lowerCAmelCase_ = { layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(__a ) } # Retrieves the keys for the decoder up blocks only lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} ) lowerCAmelCase_ = { layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(__a ) } for i in range(__a ): lowerCAmelCase_ = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key] if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict: lowerCAmelCase_ = vae_state_dict.pop( F"encoder.down.{i}.downsample.conv.weight" ) lowerCAmelCase_ = vae_state_dict.pop( F"encoder.down.{i}.downsample.conv.bias" ) lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"down.{i}.block", "new": F"down_blocks.{i}.resnets"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key] lowerCAmelCase_ = 2 for i in range(1 , num_mid_res_blocks + 1 ): lowerCAmelCase_ = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key] lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key] lowerCAmelCase_ = renew_vae_attention_paths(__a ) lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) conv_attn_to_linear(__a ) for i in range(__a ): lowerCAmelCase_ = num_up_blocks - 1 - i lowerCAmelCase_ = [ key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key ] if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict: lowerCAmelCase_ = vae_state_dict[ F"decoder.up.{block_id}.upsample.conv.weight" ] lowerCAmelCase_ = vae_state_dict[ F"decoder.up.{block_id}.upsample.conv.bias" ] lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"up.{block_id}.block", "new": F"up_blocks.{i}.resnets"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key] lowerCAmelCase_ = 2 for i in range(1 , num_mid_res_blocks + 1 ): lowerCAmelCase_ = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key] lowerCAmelCase_ = renew_vae_resnet_paths(__a ) lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key] lowerCAmelCase_ = renew_vae_attention_paths(__a ) lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a ) conv_attn_to_linear(__a ) return new_checkpoint def A(__a: str , __a: str , ): # Only support V1 lowerCAmelCase_ = requests.get( " https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" ) lowerCAmelCase_ = io.BytesIO(r.content ) lowerCAmelCase_ = OmegaConf.load(__a ) lowerCAmelCase_ = 512 lowerCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu" if checkpoint_path.endswith("safetensors" ): from safetensors import safe_open lowerCAmelCase_ = {} with safe_open(__a , framework="pt" , device="cpu" ) as f: for key in f.keys(): lowerCAmelCase_ = f.get_tensor(__a ) else: lowerCAmelCase_ = torch.load(__a , map_location=__a )["state_dict"] # Convert the VAE model. lowerCAmelCase_ = create_vae_diffusers_config(__a , image_size=__a ) lowerCAmelCase_ = custom_convert_ldm_vae_checkpoint(__a , __a ) lowerCAmelCase_ = AutoencoderKL(**__a ) vae.load_state_dict(__a ) vae.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''') lowerCamelCase__ = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
22
1
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys lowerCamelCase__ = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''') lowerCamelCase__ = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode('''utf-8''').split() lowerCamelCase__ = '''|'''.join(sys.argv[1:]) lowerCamelCase__ = re.compile(RF'''^({joined_dirs}).*?\.py$''') lowerCamelCase__ = [x for x in modified_files if regex.match(x)] print(''' '''.join(relevant_modified_files), end='''''')
22
def A(): return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] lowerCamelCase__ = generate_large_matrix() lowerCamelCase__ = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def A(__a: list[list[int]] ): assert all(row == sorted(__a , reverse=__a ) for row in grid ) assert all(list(__a ) == sorted(__a , reverse=__a ) for col in zip(*__a ) ) def A(__a: list[int] ): lowerCAmelCase_ = 0 lowerCAmelCase_ = len(__a ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: lowerCAmelCase_ = (left + right) // 2 lowerCAmelCase_ = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: lowerCAmelCase_ = mid + 1 else: lowerCAmelCase_ = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__a ) def A(__a: list[list[int]] ): lowerCAmelCase_ = 0 lowerCAmelCase_ = len(grid[0] ) for i in range(len(__a ) ): lowerCAmelCase_ = find_negative_index(grid[i][:bound] ) total += bound return (len(__a ) * len(grid[0] )) - total def A(__a: list[list[int]] ): return len([number for row in grid for number in row if number < 0] ) def A(__a: list[list[int]] ): lowerCAmelCase_ = 0 for row in grid: for i, number in enumerate(__a ): if number < 0: total += len(__a ) - i break return total def A(): from timeit import timeit print("Running benchmarks" ) lowerCAmelCase_ = ( "from __main__ import count_negatives_binary_search, " "count_negatives_brute_force, count_negatives_brute_force_with_break, grid" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): lowerCAmelCase_ = timeit(F"{func}(grid=grid)" , setup=__a , number=500 ) print(F"{func}() took {time:0.4f} seconds" ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
22
1
import torch from transformers import AutoModel class __magic_name__ (torch.nn.Module ): def __init__( self , _a="sayef/fsner-bert-base-uncased" ) -> Optional[Any]: super(_a , self ).__init__() lowerCAmelCase_ = AutoModel.from_pretrained(_a , return_dict=_a ) lowerCAmelCase_ = torch.nn.CosineSimilarity(3 , 1E-08 ) lowerCAmelCase_ = torch.nn.Softmax(dim=1 ) def __a ( self , **_a ) -> Dict: return self.bert(**_a ).last_hidden_state def __a ( self , _a ) -> str: return token_embeddings.sum(2 , keepdim=_a ) def __a ( self , _a , _a , _a=1 ) -> Any: return self.softmax(T * self.cos(_a , _a ) ) def __a ( self , _a , _a ) -> Optional[Any]: lowerCAmelCase_ = W_supports["sizes"].tolist() lowerCAmelCase_ = W_supports["start_token_id"].item() lowerCAmelCase_ = W_supports["end_token_id"].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] lowerCAmelCase_ = self.BERT(**_a ) lowerCAmelCase_ = self.BERT(**_a ) lowerCAmelCase_ = None lowerCAmelCase_ = None lowerCAmelCase_ = W_supports["input_ids"] == start_token_id lowerCAmelCase_ = W_supports["input_ids"] == end_token_id for i, size in enumerate(_a ): if i == 0: lowerCAmelCase_ = 0 else: lowerCAmelCase_ = support_sizes[i - 1] lowerCAmelCase_ = S[s : s + size][start_token_masks[s : s + size]] lowerCAmelCase_ = S[s : s + size][end_token_masks[s : s + size]] lowerCAmelCase_ = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) lowerCAmelCase_ = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: lowerCAmelCase_ = torch.vstack((p_starts, p_start) ) lowerCAmelCase_ = torch.vstack((p_ends, p_end) ) else: lowerCAmelCase_ = p_start lowerCAmelCase_ = p_end return p_starts, p_ends
22
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging lowerCamelCase__ = logging.get_logger(__name__) def A(__a: Dict ): lowerCAmelCase_ = r"\w+[.]\d+" lowerCAmelCase_ = re.findall(__a , __a ) for pat in pats: lowerCAmelCase_ = key.replace(__a , "_".join(pat.split("." ) ) ) return key def A(__a: str , __a: Tuple , __a: List[Any] ): lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",) if ( any("norm" in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowerCAmelCase_ = pt_tuple_key[:-1] + ("embedding",) return renamed_pt_tuple_key, pt_tensor # conv layer lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowerCAmelCase_ = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight": lowerCAmelCase_ = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCAmelCase_ = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCAmelCase_ = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def A(__a: Dict , __a: Any , __a: List[Any]=42 ): # Step 1: Convert pytorch tensor to numpy lowerCAmelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowerCAmelCase_ = flax_model.init_weights(PRNGKey(__a ) ) lowerCAmelCase_ = flatten_dict(__a ) lowerCAmelCase_ = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCAmelCase_ = rename_key(__a ) lowerCAmelCase_ = tuple(renamed_pt_key.split("." ) ) # Correctly rename weight parameters lowerCAmelCase_ , lowerCAmelCase_ = rename_key_and_reshape_tensor(__a , __a , __a ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." ) # also add unexpected weight so that warning is thrown lowerCAmelCase_ = jnp.asarray(__a ) return unflatten_dict(__a )
22
1
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __magic_name__ (__lowercase ): lowerCamelCase__ = ['''image_processor''', '''tokenizer'''] lowerCamelCase__ = '''LayoutLMv3ImageProcessor''' lowerCamelCase__ = ('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''') def __init__( self , _a=None , _a=None , **_a ) -> Optional[int]: lowerCAmelCase_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , _a , ) lowerCAmelCase_ = kwargs.pop("feature_extractor" ) lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_a , _a ) def __call__( self , _a , _a = None , _a = None , _a = None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding: # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) # first, apply the image processor lowerCAmelCase_ = self.image_processor(images=_a , return_tensors=_a ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(_a , _a ): lowerCAmelCase_ = [text] # add batch dimension (as the image processor always adds a batch dimension) lowerCAmelCase_ = features["words"] lowerCAmelCase_ = self.tokenizer( text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , ) # add pixel values lowerCAmelCase_ = features.pop("pixel_values" ) if return_overflowing_tokens is True: lowerCAmelCase_ = self.get_overflowing_images(_a , encoded_inputs["overflow_to_sample_mapping"] ) lowerCAmelCase_ = images return encoded_inputs def __a ( self , _a , _a ) -> int: # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image lowerCAmelCase_ = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(_a ) != len(_a ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" f" {len(_a )} and {len(_a )}" ) return images_with_overflow def __a ( self , *_a , **_a ) -> Optional[Any]: return self.tokenizer.batch_decode(*_a , **_a ) def __a ( self , *_a , **_a ) -> str: return self.tokenizer.decode(*_a , **_a ) @property def __a ( self ) -> int: return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def __a ( self ) -> List[Any]: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _a , ) return self.image_processor_class @property def __a ( self ) -> Optional[Any]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _a , ) return self.image_processor
22
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
1
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class __magic_name__ : def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=24 , _a=2 , _a=6 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.0_2 , _a=3 , _a=None , _a=1000 , ) -> Dict: lowerCAmelCase_ = parent lowerCAmelCase_ = batch_size lowerCAmelCase_ = seq_length lowerCAmelCase_ = is_training lowerCAmelCase_ = use_input_mask lowerCAmelCase_ = use_token_type_ids lowerCAmelCase_ = use_labels lowerCAmelCase_ = vocab_size lowerCAmelCase_ = hidden_size lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_act lowerCAmelCase_ = hidden_dropout_prob lowerCAmelCase_ = attention_probs_dropout_prob lowerCAmelCase_ = max_position_embeddings lowerCAmelCase_ = type_vocab_size lowerCAmelCase_ = type_sequence_label_size lowerCAmelCase_ = initializer_range lowerCAmelCase_ = num_labels lowerCAmelCase_ = scope lowerCAmelCase_ = range_bbox def __a ( self ) -> Any: lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: lowerCAmelCase_ = bbox[i, j, 3] lowerCAmelCase_ = bbox[i, j, 1] lowerCAmelCase_ = t if bbox[i, j, 2] < bbox[i, j, 0]: lowerCAmelCase_ = bbox[i, j, 2] lowerCAmelCase_ = bbox[i, j, 0] lowerCAmelCase_ = t lowerCAmelCase_ = None if self.use_input_mask: lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) lowerCAmelCase_ = None if self.use_token_type_ids: lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase_ = None lowerCAmelCase_ = None if self.use_labels: lowerCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase_ = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def __a ( self ) -> List[str]: return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def __a ( self , _a , _a , _a , _a , _a , _a , _a , ) -> List[str]: lowerCAmelCase_ = LiltModel(config=_a ) model.to(_a ) model.eval() lowerCAmelCase_ = model(_a , bbox=_a , attention_mask=_a , token_type_ids=_a ) lowerCAmelCase_ = model(_a , bbox=_a , token_type_ids=_a ) lowerCAmelCase_ = model(_a , bbox=_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __a ( self , _a , _a , _a , _a , _a , _a , _a , ) -> Dict: lowerCAmelCase_ = self.num_labels lowerCAmelCase_ = LiltForTokenClassification(config=_a ) model.to(_a ) model.eval() lowerCAmelCase_ = model( _a , bbox=_a , attention_mask=_a , token_type_ids=_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self , _a , _a , _a , _a , _a , _a , _a , ) -> Dict: lowerCAmelCase_ = LiltForQuestionAnswering(config=_a ) model.to(_a ) model.eval() lowerCAmelCase_ = model( _a , bbox=_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self ) -> Dict: lowerCAmelCase_ = self.prepare_config_and_inputs() ( ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ( lowerCAmelCase_ ) , ) = config_and_inputs lowerCAmelCase_ = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class __magic_name__ (__lowercase , __lowercase , __lowercase , unittest.TestCase ): lowerCamelCase__ = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) lowerCamelCase__ = ( { '''feature-extraction''': LiltModel, '''question-answering''': LiltForQuestionAnswering, '''text-classification''': LiltForSequenceClassification, '''token-classification''': LiltForTokenClassification, '''zero-shot''': LiltForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False def __a ( self , _a , _a , _a , _a , _a ) -> Optional[int]: return True def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = LiltModelTester(self ) lowerCAmelCase_ = ConfigTester(self , config_class=_a , hidden_size=37 ) def __a ( self ) -> Optional[Any]: self.config_tester.run_common_tests() def __a ( self ) -> Tuple: lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def __a ( self ) -> List[str]: lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCAmelCase_ = type self.model_tester.create_and_check_model(*_a ) def __a ( self ) -> List[str]: lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_a ) def __a ( self ) -> Optional[int]: lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_a ) @slow def __a ( self ) -> Optional[Any]: for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ = LiltModel.from_pretrained(_a ) self.assertIsNotNone(_a ) @require_torch @slow class __magic_name__ (unittest.TestCase ): def __a ( self ) -> Optional[int]: lowerCAmelCase_ = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(_a ) lowerCAmelCase_ = torch.tensor([[1, 2]] , device=_a ) lowerCAmelCase_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_a ) # forward pass with torch.no_grad(): lowerCAmelCase_ = model(input_ids=_a , bbox=_a ) lowerCAmelCase_ = torch.Size([1, 2, 768] ) lowerCAmelCase_ = torch.tensor( [[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=_a , ) self.assertTrue(outputs.last_hidden_state.shape , _a ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _a , atol=1E-3 ) )
22
import math def A(__a: int ): return math.sqrt(__a ) * math.sqrt(__a ) == num def A(__a: int ): lowerCAmelCase_ = 0 lowerCAmelCase_ = n while left <= right: lowerCAmelCase_ = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: lowerCAmelCase_ = mid - 1 else: lowerCAmelCase_ = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
22
1
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import subprocess from packaging.version import Version, parse from accelerate.commands.config.config_args import default_config_file, load_config_from_file lowerCamelCase__ = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.''' def A(__a: List[str]=None ): if subparsers is not None: lowerCAmelCase_ = subparsers.add_parser("tpu-config" , description=_description ) else: lowerCAmelCase_ = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description ) # Core arguments lowerCAmelCase_ = parser.add_argument_group( "Config Arguments" , "Arguments that can be configured through `accelerate config`." ) config_args.add_argument( "--config_file" , type=__a , default=__a , help="Path to the config file to use for accelerate." , ) config_args.add_argument( "--tpu_name" , default=__a , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , ) config_args.add_argument( "--tpu_zone" , default=__a , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , ) lowerCAmelCase_ = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." ) pod_args.add_argument( "--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , ) pod_args.add_argument( "--command_file" , default=__a , help="The path to the file containing the commands to run on the pod on startup." , ) pod_args.add_argument( "--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , ) pod_args.add_argument( "--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , ) pod_args.add_argument( "--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , ) pod_args.add_argument( "--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." ) if subparsers is not None: parser.set_defaults(func=__a ) return parser def A(__a: str ): lowerCAmelCase_ = None # Get the default from the config file if it exists. if args.config_file is not None or os.path.isfile(__a ): lowerCAmelCase_ = load_config_from_file(args.config_file ) if not args.command_file and defaults.command_file is not None and not args.command: lowerCAmelCase_ = defaults.command_file if not args.command and defaults.commands is not None: lowerCAmelCase_ = defaults.commands if not args.tpu_name: lowerCAmelCase_ = defaults.tpu_name if not args.tpu_zone: lowerCAmelCase_ = defaults.tpu_zone if args.accelerate_version == "dev": lowerCAmelCase_ = "git+https://github.com/huggingface/accelerate.git" elif args.accelerate_version == "latest": lowerCAmelCase_ = "accelerate -U" elif isinstance(parse(args.accelerate_version ) , __a ): lowerCAmelCase_ = F"accelerate=={args.accelerate_version}" if not args.command_file and not args.command: raise ValueError("You must specify either a command file or a command to run on the pod." ) if args.command_file: with open(args.command_file , "r" ) as f: lowerCAmelCase_ = [f.read().splitlines()] # To turn list of lists into list of strings if isinstance(args.command[0] , __a ): lowerCAmelCase_ = [line for cmd in args.command for line in cmd] # Default to the shared folder and install accelerate lowerCAmelCase_ = ["cd /usr/share"] if args.install_accelerate: new_cmd += [F"pip install {args.accelerate_version}"] new_cmd += args.command lowerCAmelCase_ = "; ".join(__a ) # Then send it to gcloud # Eventually try to use google-api-core to do this instead of subprocess lowerCAmelCase_ = ["gcloud"] if args.use_alpha: cmd += ["alpha"] cmd += [ "compute", "tpus", "tpu-vm", "ssh", args.tpu_name, "--zone", args.tpu_zone, "--command", args.command, "--worker", "all", ] if args.debug: print(F"Running {' '.join(__a )}" ) return subprocess.run(__a ) print("Successfully setup pod." ) def A(): lowerCAmelCase_ = tpu_command_parser() lowerCAmelCase_ = parser.parse_args() tpu_command_launcher(__a )
22
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers lowerCamelCase__ = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append('''dataclasses''') if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append('''importlib_metadata''') for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def A(__a: Dict , __a: List[str]=None ): require_version(deps[pkg] , __a )
22
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase__ = { '''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''], '''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ['''BertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BertForMaskedLM''', '''BertForMultipleChoice''', '''BertForNextSentencePrediction''', '''BertForPreTraining''', '''BertForQuestionAnswering''', '''BertForSequenceClassification''', '''BertForTokenClassification''', '''BertLayer''', '''BertLMHeadModel''', '''BertModel''', '''BertPreTrainedModel''', '''load_tf_weights_in_bert''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFBertEmbeddings''', '''TFBertForMaskedLM''', '''TFBertForMultipleChoice''', '''TFBertForNextSentencePrediction''', '''TFBertForPreTraining''', '''TFBertForQuestionAnswering''', '''TFBertForSequenceClassification''', '''TFBertForTokenClassification''', '''TFBertLMHeadModel''', '''TFBertMainLayer''', '''TFBertModel''', '''TFBertPreTrainedModel''', ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ['''TFBertTokenizer'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''FlaxBertForCausalLM''', '''FlaxBertForMaskedLM''', '''FlaxBertForMultipleChoice''', '''FlaxBertForNextSentencePrediction''', '''FlaxBertForPreTraining''', '''FlaxBertForQuestionAnswering''', '''FlaxBertForSequenceClassification''', '''FlaxBertForTokenClassification''', '''FlaxBertModel''', '''FlaxBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging lowerCamelCase__ = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt'''] lowerCamelCase__ = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse('''0.9.0'''): raise Exception('''requires fairseq >= 0.9.0''') logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = ''' Hello world! cécé herlolip''' lowerCamelCase__ = [ ('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''), ('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''), ('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''), ('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''), ] def A(__a: Any ): lowerCAmelCase_ = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "_float_tensor", ] for k in ignore_keys: state_dict.pop(__a , __a ) def A(__a: Optional[int] , __a: List[Any] , __a: Union[str, Any] ): lowerCAmelCase_ = dct.pop(__a ) lowerCAmelCase_ = val def A(__a: Tuple ): lowerCAmelCase_ = torch.load(__a , map_location="cpu" ) lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval() hub_interface.model.load_state_dict(sd["model"] ) return hub_interface def A(__a: List[str] ): lowerCAmelCase_ , lowerCAmelCase_ = emb.weight.shape lowerCAmelCase_ = nn.Linear(__a , __a , bias=__a ) lowerCAmelCase_ = emb.weight.data return lin_layer @torch.no_grad() def A(__a: Tuple , __a: Union[str, Any] , __a: str=None ): if not os.path.exists(__a ): lowerCAmelCase_ = torch.hub.load("pytorch/fairseq" , __a ).eval() else: lowerCAmelCase_ = load_xsum_checkpoint(__a ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: lowerCAmelCase_ = checkpoint_path.replace("." , "-" ) lowerCAmelCase_ = BartConfig.from_pretrained(__a ) lowerCAmelCase_ = bart.encode(__a ).unsqueeze(0 ) lowerCAmelCase_ = BartTokenizer.from_pretrained(__a ).encode(__a , return_tensors="pt" ).unsqueeze(0 ) if not torch.eq(__a , __a ).all(): raise ValueError( F"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" ) if checkpoint_path == "bart.large.mnli": lowerCAmelCase_ = bart.state_dict() remove_ignore_keys_(__a ) lowerCAmelCase_ = state_dict["model.decoder.embed_tokens.weight"] for src, dest in mnli_rename_keys: rename_key(__a , __a , __a ) lowerCAmelCase_ = BartForSequenceClassification(__a ).eval() model.load_state_dict(__a ) lowerCAmelCase_ = bart.predict("mnli" , __a , return_logits=__a ) lowerCAmelCase_ = model(__a )[0] # logits else: # no classification heads to worry about lowerCAmelCase_ = bart.model.state_dict() remove_ignore_keys_(__a ) lowerCAmelCase_ = state_dict["decoder.embed_tokens.weight"] lowerCAmelCase_ = bart.extract_features(__a ) if hf_checkpoint_name == "facebook/bart-large": lowerCAmelCase_ = BartModel(__a ).eval() model.load_state_dict(__a ) lowerCAmelCase_ = model(__a ).model[0] else: lowerCAmelCase_ = BartForConditionalGeneration(__a ).eval() # an existing summarization ckpt model.model.load_state_dict(__a ) if hasattr(__a , "lm_head" ): lowerCAmelCase_ = make_linear_from_emb(model.model.shared ) lowerCAmelCase_ = model.model(__a )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( F"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" ) Path(__a ).mkdir(exist_ok=__a ) model.save_pretrained(__a ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum''' ) lowerCamelCase__ = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
22
1
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs lowerCamelCase__ = imread(R'''digital_image_processing/image_data/lena_small.jpg''') lowerCamelCase__ = cvtColor(img, COLOR_BGR2GRAY) def A(): lowerCAmelCase_ = cn.convert_to_negative(__a ) # assert negative_img array for at least one True assert negative_img.any() def A(): with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img: # Work around assertion for response assert str(cc.change_contrast(__a , 110 ) ).startswith( "<PIL.Image.Image image mode=RGB size=100x100 at" ) def A(): lowerCAmelCase_ = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def A(): lowerCAmelCase_ = imread("digital_image_processing/image_data/lena_small.jpg" , 0 ) # assert ambiguous array for all == True assert canny_img.all() lowerCAmelCase_ = canny.canny(__a ) # assert canny array for at least one True assert canny_array.any() def A(): assert gg.gaussian_filter(__a , 5 , sigma=0.9 ).all() def A(): # laplace diagonals lowerCAmelCase_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) lowerCAmelCase_ = conv.img_convolve(__a , __a ).astype(__a ) assert res.any() def A(): assert med.median_filter(__a , 3 ).any() def A(): lowerCAmelCase_ , lowerCAmelCase_ = sob.sobel_filter(__a ) assert grad.any() and theta.any() def A(): lowerCAmelCase_ = sp.make_sepia(__a , 20 ) assert sepia.all() def A(__a: str = "digital_image_processing/image_data/lena_small.jpg" ): lowerCAmelCase_ = bs.Burkes(imread(__a , 1 ) , 120 ) burkes.process() assert burkes.output_img.any() def A(__a: str = "digital_image_processing/image_data/lena_small.jpg" , ): lowerCAmelCase_ = rs.NearestNeighbour(imread(__a , 1 ) , 400 , 200 ) nn.process() assert nn.output.any() def A(): lowerCAmelCase_ = "digital_image_processing/image_data/lena.jpg" # Reading the image and converting it to grayscale. lowerCAmelCase_ = imread(__a , 0 ) # Test for get_neighbors_pixel function() return not None lowerCAmelCase_ = 0 lowerCAmelCase_ = 0 lowerCAmelCase_ = image[x_coordinate][y_coordinate] lowerCAmelCase_ = lbp.get_neighbors_pixel( __a , __a , __a , __a ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image lowerCAmelCase_ = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): lowerCAmelCase_ = lbp.local_binary_value(__a , __a , __a ) assert lbp_image.any()
22
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __magic_name__ (__lowercase , unittest.TestCase ): lowerCamelCase__ = MobileBertTokenizer lowerCamelCase__ = MobileBertTokenizerFast lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = filter_non_english lowerCamelCase__ = '''google/mobilebert-uncased''' def __a ( self ) -> Optional[Any]: super().setUp() lowerCAmelCase_ = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) lowerCAmelCase_ = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def __a ( self , _a ) -> Any: lowerCAmelCase_ = "UNwant\u00E9d,running" lowerCAmelCase_ = "unwanted, running" return input_text, output_text def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = self.tokenizer_class(self.vocab_file ) lowerCAmelCase_ = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(_a , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] ) def __a ( self ) -> Tuple: if not self.test_rust_tokenizer: return lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = "UNwant\u00E9d,running" lowerCAmelCase_ = tokenizer.tokenize(_a ) lowerCAmelCase_ = rust_tokenizer.tokenize(_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = tokenizer.encode(_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a ) self.assertListEqual(_a , _a ) # With lower casing lowerCAmelCase_ = self.get_tokenizer(do_lower_case=_a ) lowerCAmelCase_ = self.get_rust_tokenizer(do_lower_case=_a ) lowerCAmelCase_ = "UNwant\u00E9d,running" lowerCAmelCase_ = tokenizer.tokenize(_a ) lowerCAmelCase_ = rust_tokenizer.tokenize(_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a , add_special_tokens=_a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = self.get_rust_tokenizer() lowerCAmelCase_ = tokenizer.encode(_a ) lowerCAmelCase_ = rust_tokenizer.encode(_a ) self.assertListEqual(_a , _a ) def __a ( self ) -> Any: lowerCAmelCase_ = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def __a ( self ) -> Dict: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __a ( self ) -> List[Any]: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def __a ( self ) -> str: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __a ( self ) -> str: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def __a ( self ) -> str: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def __a ( self ) -> List[str]: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , strip_accents=_a ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def __a ( self ) -> Any: lowerCAmelCase_ = BasicTokenizer(do_lower_case=_a , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def __a ( self ) -> Any: lowerCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] lowerCAmelCase_ = {} for i, token in enumerate(_a ): lowerCAmelCase_ = i lowerCAmelCase_ = WordpieceTokenizer(vocab=_a , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def __a ( self ) -> Optional[int]: self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def __a ( self ) -> List[str]: self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def __a ( self ) -> Dict: self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def __a ( self ) -> Any: lowerCAmelCase_ = self.get_tokenizer() lowerCAmelCase_ = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) self.assertListEqual( [rust_tokenizer.tokenize(_a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) @slow def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" ) lowerCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a ) lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a , _a ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def __a ( self ) -> Union[str, Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence." lowerCAmelCase_ = tokenizer_r.encode_plus( _a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , ) lowerCAmelCase_ = tokenizer_r.do_lower_case if hasattr(_a , "do_lower_case" ) else False lowerCAmelCase_ = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def __a ( self ) -> Optional[int]: lowerCAmelCase_ = ["的", "人", "有"] lowerCAmelCase_ = "".join(_a ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCAmelCase_ = True lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a ) lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_a , _a ) self.assertListEqual(_a , _a ) lowerCAmelCase_ = False lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a ) lowerCAmelCase_ = tokenizer_r.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_p.encode(_a , add_special_tokens=_a ) lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(_a ) lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(_a ) # it is expected that only the first Chinese character is not preceded by "##". lowerCAmelCase_ = [ f"##{token}" if idx != 0 else token for idx, token in enumerate(_a ) ] self.assertListEqual(_a , _a ) self.assertListEqual(_a , _a )
22
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase__ = { '''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''], '''tokenization_roformer''': ['''RoFormerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ['''RoFormerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RoFormerForCausalLM''', '''RoFormerForMaskedLM''', '''RoFormerForMultipleChoice''', '''RoFormerForQuestionAnswering''', '''RoFormerForSequenceClassification''', '''RoFormerForTokenClassification''', '''RoFormerLayer''', '''RoFormerModel''', '''RoFormerPreTrainedModel''', '''load_tf_weights_in_roformer''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRoFormerForCausalLM''', '''TFRoFormerForMaskedLM''', '''TFRoFormerForMultipleChoice''', '''TFRoFormerForQuestionAnswering''', '''TFRoFormerForSequenceClassification''', '''TFRoFormerForTokenClassification''', '''TFRoFormerLayer''', '''TFRoFormerModel''', '''TFRoFormerPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FlaxRoFormerForMaskedLM''', '''FlaxRoFormerForMultipleChoice''', '''FlaxRoFormerForQuestionAnswering''', '''FlaxRoFormerForSequenceClassification''', '''FlaxRoFormerForTokenClassification''', '''FlaxRoFormerModel''', '''FlaxRoFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
import math from collections.abc import Iterator from itertools import takewhile def A(__a: int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A(): lowerCAmelCase_ = 2 while True: if is_prime(__a ): yield num num += 1 def A(__a: int = 200_0000 ): return sum(takewhile(lambda __a : x < n , prime_generator() ) ) if __name__ == "__main__": print(F'''{solution() = }''')
22
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { '''microsoft/unispeech-large-1500h-cv''': ( '''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json''' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''unispeech''' def __init__( self , _a=32 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.0 , _a=0.1 , _a=0.1 , _a=0.0_2 , _a=1E-5 , _a="group" , _a="gelu" , _a=(512, 512, 512, 512, 512, 512, 512) , _a=(5, 2, 2, 2, 2, 2, 2) , _a=(10, 3, 3, 3, 3, 2, 2) , _a=False , _a=128 , _a=16 , _a=False , _a=True , _a=0.0_5 , _a=10 , _a=2 , _a=0.0 , _a=10 , _a=0 , _a=320 , _a=2 , _a=0.1 , _a=100 , _a=256 , _a=256 , _a=0.1 , _a="mean" , _a=False , _a=False , _a=256 , _a=80 , _a=0 , _a=1 , _a=2 , _a=0.5 , **_a , ) -> Dict: super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a ) lowerCAmelCase_ = hidden_size lowerCAmelCase_ = feat_extract_norm lowerCAmelCase_ = feat_extract_activation lowerCAmelCase_ = list(_a ) lowerCAmelCase_ = list(_a ) lowerCAmelCase_ = list(_a ) lowerCAmelCase_ = conv_bias lowerCAmelCase_ = num_conv_pos_embeddings lowerCAmelCase_ = num_conv_pos_embedding_groups lowerCAmelCase_ = len(self.conv_dim ) lowerCAmelCase_ = num_hidden_layers lowerCAmelCase_ = intermediate_size lowerCAmelCase_ = hidden_act lowerCAmelCase_ = num_attention_heads lowerCAmelCase_ = hidden_dropout lowerCAmelCase_ = attention_dropout lowerCAmelCase_ = activation_dropout lowerCAmelCase_ = feat_proj_dropout lowerCAmelCase_ = final_dropout lowerCAmelCase_ = layerdrop lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = initializer_range lowerCAmelCase_ = num_ctc_classes lowerCAmelCase_ = vocab_size lowerCAmelCase_ = do_stable_layer_norm lowerCAmelCase_ = use_weighted_layer_sum lowerCAmelCase_ = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`," f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCAmelCase_ = apply_spec_augment lowerCAmelCase_ = mask_time_prob lowerCAmelCase_ = mask_time_length lowerCAmelCase_ = mask_time_min_masks lowerCAmelCase_ = mask_feature_prob lowerCAmelCase_ = mask_feature_length lowerCAmelCase_ = mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowerCAmelCase_ = num_codevectors_per_group lowerCAmelCase_ = num_codevector_groups lowerCAmelCase_ = contrastive_logits_temperature lowerCAmelCase_ = feat_quantizer_dropout lowerCAmelCase_ = num_negatives lowerCAmelCase_ = codevector_dim lowerCAmelCase_ = proj_codevector_dim lowerCAmelCase_ = diversity_loss_weight # ctc loss lowerCAmelCase_ = ctc_loss_reduction lowerCAmelCase_ = ctc_zero_infinity # pretraining loss lowerCAmelCase_ = replace_prob @property def __a ( self ) -> str: return functools.reduce(operator.mul , self.conv_stride , 1 )
22
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { '''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''', '''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''', '''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''', '''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''', # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''mobilenet_v2''' def __init__( self , _a=3 , _a=224 , _a=1.0 , _a=8 , _a=8 , _a=6 , _a=32 , _a=True , _a=True , _a="relu6" , _a=True , _a=0.8 , _a=0.0_2 , _a=0.0_0_1 , _a=255 , **_a , ) -> Dict: super().__init__(**_a ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) lowerCAmelCase_ = num_channels lowerCAmelCase_ = image_size lowerCAmelCase_ = depth_multiplier lowerCAmelCase_ = depth_divisible_by lowerCAmelCase_ = min_depth lowerCAmelCase_ = expand_ratio lowerCAmelCase_ = output_stride lowerCAmelCase_ = first_layer_is_expansion lowerCAmelCase_ = finegrained_output lowerCAmelCase_ = hidden_act lowerCAmelCase_ = tf_padding lowerCAmelCase_ = classifier_dropout_prob lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = semantic_loss_ignore_index class __magic_name__ (__lowercase ): lowerCamelCase__ = version.parse('''1.11''' ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict([("pixel_values", {0: "batch"})] ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def __a ( self ) -> float: return 1E-4
22
1
import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor lowerCamelCase__ = logging.get_logger(__name__) class __magic_name__ (__lowercase ): def __init__( self , *_a , **_a ) -> None: warnings.warn( "The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use LayoutLMv2ImageProcessor instead." , _a , ) super().__init__(*_a , **_a )
22
from __future__ import annotations def A(__a: dict , __a: str ): lowerCAmelCase_ , lowerCAmelCase_ = set(__a ), [start] while stack: lowerCAmelCase_ = stack.pop() explored.add(__a ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(__a ) return explored lowerCamelCase__ = { '''A''': ['''B''', '''C''', '''D'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F'''], '''D''': ['''B''', '''D'''], '''E''': ['''B''', '''F'''], '''F''': ['''C''', '''E''', '''G'''], '''G''': ['''F'''], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, '''A'''))
22
1
from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def A(): lowerCAmelCase_ = { "repo_name": ["test_repo1", "test_repo2", "test_repo3"], "path": ["test_1.py", "test_2.py", "unit_test.py"], "content": ["a " * 20, "a " * 30, "b " * 7], } lowerCAmelCase_ = Dataset.from_dict(__a ) return dataset class __magic_name__ (__lowercase ): def __a ( self ) -> Optional[int]: lowerCAmelCase_ = get_dataset() lowerCAmelCase_ = make_duplicate_clusters(_a , 0.8_5 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def __a ( self ) -> str: lowerCAmelCase_ = get_dataset() lowerCAmelCase_ , lowerCAmelCase_ = deduplicate_dataset(_a ) self.assertEqual(len(_a ) , 2 ) print(_a ) self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 ) self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , _a )
22
def A(__a: Tuple ): lowerCAmelCase_ = len(__a ) while cur > 1: # Find the maximum number in arr lowerCAmelCase_ = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi lowerCAmelCase_ = arr[mi::-1] + arr[mi + 1 : len(__a )] # Reverse whole list lowerCAmelCase_ = arr[cur - 1 :: -1] + arr[cur : len(__a )] cur -= 1 return arr if __name__ == "__main__": lowerCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip() lowerCamelCase__ = [int(item) for item in user_input.split(''',''')] print(pancake_sort(unsorted))
22
1
import datasets lowerCamelCase__ = '''\ @InProceedings{conneau2018xnli, author = "Conneau, Alexis and Rinott, Ruty and Lample, Guillaume and Williams, Adina and Bowman, Samuel R. and Schwenk, Holger and Stoyanov, Veselin", title = "XNLI: Evaluating Cross-lingual Sentence Representations", booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", year = "2018", publisher = "Association for Computational Linguistics", location = "Brussels, Belgium", } ''' lowerCamelCase__ = '''\ XNLI is a subset of a few thousand examples from MNLI which has been translated into a 14 different languages (some low-ish resource). As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels). ''' lowerCamelCase__ = ''' Computes XNLI score which is just simple accuracy. Args: predictions: Predicted labels. references: Ground truth labels. Returns: \'accuracy\': accuracy Examples: >>> predictions = [0, 1] >>> references = [0, 1] >>> xnli_metric = datasets.load_metric("xnli") >>> results = xnli_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} ''' def A(__a: Dict , __a: Union[str, Any] ): return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ (datasets.Metric ): def __a ( self ) -> Tuple: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), "references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), } ) , codebase_urls=[] , reference_urls=[] , format="numpy" , ) def __a ( self , _a , _a ) -> List[str]: return {"accuracy": simple_accuracy(_a , _a )}
22
import string from math import logaa def A(__a: str , __a: str ): lowerCAmelCase_ = document.translate( str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" ) lowerCAmelCase_ = document_without_punctuation.split(" " ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def A(__a: str , __a: str ): lowerCAmelCase_ = corpus.lower().translate( str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with '' lowerCAmelCase_ = corpus_without_punctuation.split("\n" ) lowerCAmelCase_ = term.lower() return (len([doc for doc in docs if term in doc] ), len(__a )) def A(__a: int , __a: int , __a: List[Any]=False ): if smoothing: if n == 0: raise ValueError("log10(0) is undefined." ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("df must be > 0" ) elif n == 0: raise ValueError("log10(0) is undefined." ) return round(logaa(n / df ) , 3 ) def A(__a: int , __a: int ): return round(tf * idf , 3 )
22
1
lowerCamelCase__ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} lowerCamelCase__ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def A(__a: dict[int, list[int]] , __a: int , __a: list[bool] ): lowerCAmelCase_ = True lowerCAmelCase_ = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(__a , __a , __a ) order.append(__a ) return order def A(__a: dict[int, list[int]] , __a: int , __a: list[bool] ): lowerCAmelCase_ = True lowerCAmelCase_ = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(__a , __a , __a ) return component def A(__a: dict[int, list[int]] ): lowerCAmelCase_ = len(__a ) * [False] lowerCAmelCase_ = {vert: [] for vert in range(len(__a ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(__a ) lowerCAmelCase_ = [] for i, was_visited in enumerate(__a ): if not was_visited: order += topology_sort(__a , __a , __a ) lowerCAmelCase_ = [] lowerCAmelCase_ = len(__a ) * [False] for i in range(len(__a ) ): lowerCAmelCase_ = order[len(__a ) - i - 1] if not visited[vert]: lowerCAmelCase_ = find_components(__a , __a , __a ) components_list.append(__a ) return components_list
22
import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef lowerCamelCase__ = ( '''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' ) def A(__a: str , __a: List[Any] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) return (preds == labels).mean() def A(__a: Any , __a: Any ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) lowerCAmelCase_ = simple_accuracy(__a , __a ) lowerCAmelCase_ = fa_score(y_true=__a , y_pred=__a ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def A(__a: List[str] , __a: Optional[int] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) lowerCAmelCase_ = pearsonr(__a , __a )[0] lowerCAmelCase_ = spearmanr(__a , __a )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def A(__a: Union[str, Any] , __a: Any , __a: str ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) assert len(__a ) == len(__a ), F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" if task_name == "cola": return {"mcc": matthews_corrcoef(__a , __a )} elif task_name == "sst-2": return {"acc": simple_accuracy(__a , __a )} elif task_name == "mrpc": return acc_and_fa(__a , __a ) elif task_name == "sts-b": return pearson_and_spearman(__a , __a ) elif task_name == "qqp": return acc_and_fa(__a , __a ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(__a , __a )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(__a , __a )} elif task_name == "qnli": return {"acc": simple_accuracy(__a , __a )} elif task_name == "rte": return {"acc": simple_accuracy(__a , __a )} elif task_name == "wnli": return {"acc": simple_accuracy(__a , __a )} elif task_name == "hans": return {"acc": simple_accuracy(__a , __a )} else: raise KeyError(__a ) def A(__a: int , __a: Optional[Any] , __a: Optional[Any] ): warnings.warn(__a , __a ) requires_backends(__a , "sklearn" ) if len(__a ) != len(__a ): raise ValueError(F"Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}" ) if task_name == "xnli": return {"acc": simple_accuracy(__a , __a )} else: raise KeyError(__a )
22
1
import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor lowerCamelCase__ = logging.get_logger(__name__) class __magic_name__ (__lowercase ): def __init__( self , *_a , **_a ) -> None: warnings.warn( "The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use BeitImageProcessor instead." , _a , ) super().__init__(*_a , **_a )
22
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __magic_name__ (__lowercase ): lowerCamelCase__ = ['''image_processor''', '''tokenizer'''] lowerCamelCase__ = '''ViTImageProcessor''' lowerCamelCase__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__( self , _a=None , _a=None , **_a ) -> Tuple: lowerCAmelCase_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , _a , ) lowerCAmelCase_ = kwargs.pop("feature_extractor" ) lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_a , _a ) def __call__( self , _a=None , _a=None , _a=None , _a=None , **_a ) -> Dict: if text is None and visual_prompt is None and images is None: raise ValueError("You have to specify either text, visual prompt or images." ) if text is not None and visual_prompt is not None: raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." ) if text is not None: lowerCAmelCase_ = self.tokenizer(_a , return_tensors=_a , **_a ) if visual_prompt is not None: lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a ) if images is not None: lowerCAmelCase_ = self.image_processor(_a , return_tensors=_a , **_a ) if visual_prompt is not None and images is not None: lowerCAmelCase_ = { "pixel_values": image_features.pixel_values, "conditional_pixel_values": prompt_features.pixel_values, } return encoding elif text is not None and images is not None: lowerCAmelCase_ = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: lowerCAmelCase_ = { "conditional_pixel_values": prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**_a ) , tensor_type=_a ) def __a ( self , *_a , **_a ) -> List[str]: return self.tokenizer.batch_decode(*_a , **_a ) def __a ( self , *_a , **_a ) -> Optional[int]: return self.tokenizer.decode(*_a , **_a ) @property def __a ( self ) -> List[str]: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _a , ) return self.image_processor_class @property def __a ( self ) -> Optional[Any]: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _a , ) return self.image_processor
22
1
def A(__a: Optional[Any] ): lowerCAmelCase_ = len(__a ) lowerCAmelCase_ = sum(__a ) lowerCAmelCase_ = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): lowerCAmelCase_ = True for i in range(1 , s + 1 ): lowerCAmelCase_ = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): lowerCAmelCase_ = dp[i][j - 1] if arr[i - 1] <= j: lowerCAmelCase_ = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: lowerCAmelCase_ = s - 2 * j break return diff
22
import datasets lowerCamelCase__ = '''\ @InProceedings{conneau2018xnli, author = "Conneau, Alexis and Rinott, Ruty and Lample, Guillaume and Williams, Adina and Bowman, Samuel R. and Schwenk, Holger and Stoyanov, Veselin", title = "XNLI: Evaluating Cross-lingual Sentence Representations", booktitle = "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", year = "2018", publisher = "Association for Computational Linguistics", location = "Brussels, Belgium", } ''' lowerCamelCase__ = '''\ XNLI is a subset of a few thousand examples from MNLI which has been translated into a 14 different languages (some low-ish resource). As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels). ''' lowerCamelCase__ = ''' Computes XNLI score which is just simple accuracy. Args: predictions: Predicted labels. references: Ground truth labels. Returns: \'accuracy\': accuracy Examples: >>> predictions = [0, 1] >>> references = [0, 1] >>> xnli_metric = datasets.load_metric("xnli") >>> results = xnli_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} ''' def A(__a: Dict , __a: Union[str, Any] ): return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ (datasets.Metric ): def __a ( self ) -> Tuple: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), "references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), } ) , codebase_urls=[] , reference_urls=[] , format="numpy" , ) def __a ( self , _a , _a ) -> List[str]: return {"accuracy": simple_accuracy(_a , _a )}
22
1
import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate lowerCamelCase__ = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow('''''', '''|''', '''|'''), datarow=DataRow('''''', '''|''', '''|'''), padding=1, with_header_hide=None, ) lowerCamelCase__ = [] lowerCamelCase__ = [] lowerCamelCase__ = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}} lowerCamelCase__ = [ { '''type''': '''header''', '''text''': { '''type''': '''plain_text''', '''text''': F'''🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results''', '''emoji''': True, }, } ] lowerCamelCase__ = 0 for log in Path().glob('''*.log'''): lowerCamelCase__ = 0 with open(log, '''r''') as f: for line in f: lowerCamelCase__ = json.loads(line) if line.get('''nodeid''', '''''') != "": lowerCamelCase__ = line['''nodeid'''] if line.get('''duration''', None) is not None: lowerCamelCase__ = F'''{line['duration']:.4f}''' if line.get('''outcome''', '''''') == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split('''_''')[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) lowerCamelCase__ = [] log.unlink() lowerCamelCase__ = '''''' lowerCamelCase__ = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += F"*{name[1:]}: {num_failed} failed test*\n" else: message += F"*{name[1:]}: {num_failed} failed tests*\n" lowerCamelCase__ = [] lowerCamelCase__ = {} for test in failed_tests: lowerCamelCase__ = test[0].split('''::''') lowerCamelCase__ = data[0].split('''/''')[-1] if data[0] not in filesafailed: lowerCamelCase__ = [data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) lowerCamelCase__ = [test[0] for test in failed_table] lowerCamelCase__ = list(set(files)) # Count number of instances in failed_tests lowerCamelCase__ = [] for file in individual_files: table.append([file, len(filesafailed[file])]) lowerCamelCase__ = tabulate( table, headers=['''Test Location''', '''Num Failed'''], tablefmt=hf_table_format, stralign='''right''', ) message += F"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 30_00: lowerCamelCase__ = '''Too many failed tests, please see the full report in the Action results.''' lowerCamelCase__ = len(err) + 10 lowerCamelCase__ = message[: 30_00 - offset] + F'''\n...\n```\n{err}''' print(F'''### {message}''') else: lowerCamelCase__ = '''No failed tests! 🤗''' print(F'''## {message}''') payload.append(no_error_payload) if os.environ.get('''TEST_TYPE''', '''''') != "": from slack_sdk import WebClient lowerCamelCase__ = WebClient(token=os.environ['''SLACK_API_TOKEN''']) if message != "No failed tests! 🤗": lowerCamelCase__ = { '''type''': '''section''', '''text''': { '''type''': '''mrkdwn''', '''text''': message, }, } payload.append(md_report) lowerCamelCase__ = { '''type''': '''section''', '''text''': { '''type''': '''mrkdwn''', '''text''': '''*For more details:*''', }, '''accessory''': { '''type''': '''button''', '''text''': { '''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True, }, '''url''': F'''https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}''', }, } payload.append(action_button) lowerCamelCase__ = { '''type''': '''context''', '''elements''': [ { '''type''': '''plain_text''', '''text''': F'''Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}''', } ], } payload.append(date_report) lowerCamelCase__ = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload) lowerCamelCase__ = response.data['''ts'''] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name lowerCamelCase__ = '''''' for i, row in enumerate(test_failures): if row[0] != test_class: lowerCamelCase__ = row[0] else: lowerCamelCase__ = '''''' lowerCamelCase__ = { '''type''': '''section''', '''text''': { '''type''': '''mrkdwn''', '''text''': F'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```''', }, } client.chat_postMessage( channel='''#accelerate-ci-daily''', thread_ts=ts, blocks=[payload], )
22
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset lowerCamelCase__ = '''bert-base-cased''' lowerCamelCase__ = '''google/pegasus-xsum''' lowerCamelCase__ = [''' Sam ate lunch today.''', '''Sams lunch ingredients.'''] lowerCamelCase__ = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee'''] lowerCamelCase__ = '''patrickvonplaten/t5-tiny-random''' lowerCamelCase__ = '''sshleifer/bart-tiny-random''' lowerCamelCase__ = '''sshleifer/tiny-mbart''' lowerCamelCase__ = '''sshleifer/tiny-marian-en-de''' def A(__a: Path , __a: list ): lowerCAmelCase_ = "\n".join(__a ) Path(__a ).open("w" ).writelines(__a ) def A(__a: str ): for split in ["train", "val", "test"]: _dump_articles(os.path.join(__a , F"{split}.source" ) , __a ) _dump_articles(os.path.join(__a , F"{split}.target" ) , __a ) return tmp_dir class __magic_name__ (__lowercase ): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def __a ( self , _a ) -> Dict: lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a ) lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES ) lowerCAmelCase_ = 4 lowerCAmelCase_ = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated lowerCAmelCase_ , lowerCAmelCase_ = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error. lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , src_lang=_a , tgt_lang=_a , ) lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(_a , _a ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place lowerCAmelCase_ = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def __a ( self , _a ) -> str: lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a ) lowerCAmelCase_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in ARTICLES ) lowerCAmelCase_ = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES ) lowerCAmelCase_ = 4 lowerCAmelCase_ = LegacySeqaSeqDataset( _a , data_dir=_a , type_path="train" , max_source_length=20 , max_target_length=_a , ) lowerCAmelCase_ = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" ) lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) lowerCAmelCase_ = tmp_dir.joinpath("train.source" ).open().readlines() lowerCAmelCase_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(_a , _a , 128 , _a ) lowerCAmelCase_ = {x.name for x in tmp_dir.iterdir()} lowerCAmelCase_ = {x.name for x in save_dir.iterdir()} lowerCAmelCase_ = save_dir.joinpath("train.source" ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(_a ) < len(_a ) assert len(_a ) == 1 assert len(packed_examples[0] ) == sum(len(_a ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" ) def __a ( self ) -> Any: if not FAIRSEQ_AVAILABLE: return lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=64 ) lowerCAmelCase_ = 64 lowerCAmelCase_ = ds.make_dynamic_sampler(_a , required_batch_size_multiple=_a ) lowerCAmelCase_ = [len(_a ) for x in batch_sampler] assert len(set(_a ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(_a ) == len(_a ) # no dropped or added examples lowerCAmelCase_ = DataLoader(_a , batch_sampler=_a , collate_fn=ds.collate_fn , num_workers=2 ) lowerCAmelCase_ = [] lowerCAmelCase_ = [] for batch in data_loader: lowerCAmelCase_ = batch["input_ids"].shape lowerCAmelCase_ = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple lowerCAmelCase_ = np.product(batch["input_ids"].shape ) num_src_per_batch.append(_a ) if num_src_tokens > (max_tokens * 1.1): failures.append(_a ) assert num_src_per_batch[0] == max(_a ) if failures: raise AssertionError(f"too many tokens in {len(_a )} batches" ) def __a ( self ) -> List[str]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset(max_len=512 ) lowerCAmelCase_ = 2 lowerCAmelCase_ = ds.make_sortish_sampler(_a , shuffle=_a ) lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 ) lowerCAmelCase_ = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 , sampler=_a ) lowerCAmelCase_ = tokenizer.pad_token_id def count_pad_tokens(_a , _a="input_ids" ): return [batch[k].eq(_a ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(_a , k="labels" ) ) < sum(count_pad_tokens(_a , k="labels" ) ) assert sum(count_pad_tokens(_a ) ) < sum(count_pad_tokens(_a ) ) assert len(_a ) == len(_a ) def __a ( self , _a=1000 , _a=128 ) -> str: if os.getenv("USE_REAL_DATA" , _a ): lowerCAmelCase_ = "examples/seq2seq/wmt_en_ro" lowerCAmelCase_ = max_len * 2 * 64 if not Path(_a ).joinpath("train.len" ).exists(): save_len_file(_a , _a ) else: lowerCAmelCase_ = "examples/seq2seq/test_data/wmt_en_ro" lowerCAmelCase_ = max_len * 4 save_len_file(_a , _a ) lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a ) lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , n_obs=_a , ) return ds, max_tokens, tokenizer def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = self._get_dataset() lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=0 , add_extra_examples=_a ) ) lowerCAmelCase_ = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=1 , add_extra_examples=_a ) ) assert idsa.intersection(_a ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def __a ( self , _a ) -> List[str]: lowerCAmelCase_ = AutoTokenizer.from_pretrained(_a , use_fast=_a ) if tok_name == MBART_TINY: lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , ) lowerCAmelCase_ = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: lowerCAmelCase_ = SeqaSeqDataset( _a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , ) lowerCAmelCase_ = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(_a ) == 1 if tok_name == BART_TINY else len(_a ) == 0
22
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional from packaging import version if TYPE_CHECKING: from ... import PreTrainedTokenizer, TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import is_torch_available, logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { '''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/resolve/main/config.json''', '''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/config.json''', '''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json''', '''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json''', '''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/config.json''', '''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json''', } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''bloom''' lowerCamelCase__ = ['''past_key_values'''] lowerCamelCase__ = { '''num_hidden_layers''': '''n_layer''', '''num_attention_heads''': '''n_head''', } def __init__( self , _a=250880 , _a=64 , _a=2 , _a=8 , _a=1E-5 , _a=0.0_2 , _a=True , _a=1 , _a=2 , _a=False , _a=0.0 , _a=0.0 , _a=1 , _a=False , **_a , ) -> Dict: lowerCAmelCase_ = vocab_size # Backward compatibility with n_embed kwarg lowerCAmelCase_ = kwargs.pop("n_embed" , _a ) lowerCAmelCase_ = hidden_size if n_embed is None else n_embed lowerCAmelCase_ = n_layer lowerCAmelCase_ = n_head lowerCAmelCase_ = layer_norm_epsilon lowerCAmelCase_ = initializer_range lowerCAmelCase_ = use_cache lowerCAmelCase_ = pretraining_tp lowerCAmelCase_ = apply_residual_connection_post_layernorm lowerCAmelCase_ = hidden_dropout lowerCAmelCase_ = attention_dropout lowerCAmelCase_ = bos_token_id lowerCAmelCase_ = eos_token_id lowerCAmelCase_ = slow_but_exact super().__init__(bos_token_id=_a , eos_token_id=_a , **_a ) class __magic_name__ (__lowercase ): lowerCamelCase__ = version.parse('''1.12''' ) def __init__( self , _a , _a = "default" , _a = None , _a = False , ) -> int: super().__init__(_a , task=_a , patching_specs=_a , use_past=_a ) if not getattr(self._config , "pad_token_id" , _a ): # TODO: how to do that better? lowerCAmelCase_ = 0 @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: lowerCAmelCase_ = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: # BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344 self.fill_with_past_key_values_(_a , direction="inputs" , inverted_values_shape=_a ) lowerCAmelCase_ = {0: "batch", 1: "past_sequence + sequence"} else: lowerCAmelCase_ = {0: "batch", 1: "sequence"} return common_inputs @property def __a ( self ) -> int: return self._config.n_layer @property def __a ( self ) -> int: return self._config.n_head @property def __a ( self ) -> float: return 1E-3 def __a ( self , _a , _a = -1 , _a = -1 , _a = False , _a = None , ) -> Mapping[str, Any]: lowerCAmelCase_ = super(_a , self ).generate_dummy_inputs( _a , batch_size=_a , seq_length=_a , is_pair=_a , framework=_a ) # We need to order the input in the way they appears in the forward() lowerCAmelCase_ = OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCAmelCase_ , lowerCAmelCase_ = common_inputs["input_ids"].shape # Not using the same length for past_key_values lowerCAmelCase_ = seqlen + 2 lowerCAmelCase_ = self._config.hidden_size // self.num_attention_heads lowerCAmelCase_ = ( batch * self.num_attention_heads, head_dim, past_key_values_length, ) lowerCAmelCase_ = ( batch * self.num_attention_heads, past_key_values_length, head_dim, ) lowerCAmelCase_ = [ (torch.zeros(_a ), torch.zeros(_a )) for _ in range(self.num_layers ) ] lowerCAmelCase_ = common_inputs["attention_mask"] if self.use_past: lowerCAmelCase_ = ordered_inputs["attention_mask"].dtype lowerCAmelCase_ = torch.cat( [ordered_inputs["attention_mask"], torch.ones(_a , _a , dtype=_a )] , dim=1 ) return ordered_inputs @property def __a ( self ) -> int: return 13
22
def A(__a: Optional[Any] ): lowerCAmelCase_ = len(__a ) lowerCAmelCase_ = sum(__a ) lowerCAmelCase_ = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): lowerCAmelCase_ = True for i in range(1 , s + 1 ): lowerCAmelCase_ = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): lowerCAmelCase_ = dp[i][j - 1] if arr[i - 1] <= j: lowerCAmelCase_ = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: lowerCAmelCase_ = s - 2 * j break return diff
22
1
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class __magic_name__ (unittest.TestCase ): def __a ( self , _a ) -> Optional[Any]: for model_result in results.values(): for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ): lowerCAmelCase_ = model_result["result"][batch_size][sequence_length] self.assertIsNotNone(_a ) def __a ( self ) -> Tuple: lowerCAmelCase_ = "sshleifer/tiny-gpt2" lowerCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , ) lowerCAmelCase_ = PyTorchBenchmark(_a ) lowerCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __a ( self ) -> Union[str, Any]: lowerCAmelCase_ = "sgugger/tiny-distilbert-classification" lowerCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , only_pretrain_model=_a , ) lowerCAmelCase_ = PyTorchBenchmark(_a ) lowerCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __a ( self ) -> List[str]: lowerCAmelCase_ = "sshleifer/tiny-gpt2" lowerCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_a , inference=_a , torchscript=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , ) lowerCAmelCase_ = PyTorchBenchmark(_a ) lowerCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == "cpu" , "Cant do half precision" ) def __a ( self ) -> Tuple: lowerCAmelCase_ = "sshleifer/tiny-gpt2" lowerCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_a , inference=_a , fpaa=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , ) lowerCAmelCase_ = PyTorchBenchmark(_a ) lowerCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __a ( self ) -> List[str]: lowerCAmelCase_ = "sshleifer/tiny-gpt2" lowerCAmelCase_ = AutoConfig.from_pretrained(_a ) # set architectures equal to `None` lowerCAmelCase_ = None lowerCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , ) lowerCAmelCase_ = PyTorchBenchmark(_a , configs=[config] ) lowerCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __a ( self ) -> List[Any]: lowerCAmelCase_ = "sshleifer/tiny-gpt2" lowerCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , ) lowerCAmelCase_ = PyTorchBenchmark(_a ) lowerCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == "cpu" , "Can't do half precision" ) def __a ( self ) -> Optional[int]: lowerCAmelCase_ = "sshleifer/tiny-gpt2" lowerCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_a , multi_process=_a , ) lowerCAmelCase_ = PyTorchBenchmark(_a ) lowerCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __a ( self ) -> List[Any]: lowerCAmelCase_ = "sshleifer/tiny-gpt2" lowerCAmelCase_ = AutoConfig.from_pretrained(_a ) lowerCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , ) lowerCAmelCase_ = PyTorchBenchmark(_a , configs=[config] ) lowerCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __a ( self ) -> Dict: lowerCAmelCase_ = "sshleifer/tinier_bart" lowerCAmelCase_ = AutoConfig.from_pretrained(_a ) lowerCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , ) lowerCAmelCase_ = PyTorchBenchmark(_a , configs=[config] ) lowerCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __a ( self ) -> List[Any]: lowerCAmelCase_ = "sshleifer/tiny-gpt2" lowerCAmelCase_ = AutoConfig.from_pretrained(_a ) lowerCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , ) lowerCAmelCase_ = PyTorchBenchmark(_a , configs=[config] ) lowerCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __a ( self ) -> int: lowerCAmelCase_ = "sshleifer/tinier_bart" lowerCAmelCase_ = AutoConfig.from_pretrained(_a ) lowerCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , ) lowerCAmelCase_ = PyTorchBenchmark(_a , configs=[config] ) lowerCAmelCase_ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = "sshleifer/tiny-gpt2" with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_a , inference=_a , save_to_csv=_a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_a , "inf_time.csv" ) , train_memory_csv_file=os.path.join(_a , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(_a , "inf_mem.csv" ) , train_time_csv_file=os.path.join(_a , "train_time.csv" ) , env_info_csv_file=os.path.join(_a , "env.csv" ) , multi_process=_a , ) lowerCAmelCase_ = PyTorchBenchmark(_a ) benchmark.run() self.assertTrue(Path(os.path.join(_a , "inf_time.csv" ) ).exists() ) self.assertTrue(Path(os.path.join(_a , "train_time.csv" ) ).exists() ) self.assertTrue(Path(os.path.join(_a , "inf_mem.csv" ) ).exists() ) self.assertTrue(Path(os.path.join(_a , "train_mem.csv" ) ).exists() ) self.assertTrue(Path(os.path.join(_a , "env.csv" ) ).exists() ) def __a ( self ) -> Optional[Any]: lowerCAmelCase_ = "sshleifer/tiny-gpt2" def _check_summary_is_not_empty(_a ): self.assertTrue(hasattr(_a , "sequential" ) ) self.assertTrue(hasattr(_a , "cumulative" ) ) self.assertTrue(hasattr(_a , "current" ) ) self.assertTrue(hasattr(_a , "total" ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase_ = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_a , "log.txt" ) , log_print=_a , trace_memory_line_by_line=_a , multi_process=_a , ) lowerCAmelCase_ = PyTorchBenchmark(_a ) lowerCAmelCase_ = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(_a , "log.txt" ) ).exists() )
22
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def A(__a: Any , __a: Union[str, Any] , __a: List[str] ): lowerCAmelCase_ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] lowerCAmelCase_ = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } lowerCAmelCase_ = F"{src_lang}-{tgt_lang}" lowerCAmelCase_ = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n" os.makedirs(__a , exist_ok=__a ) lowerCAmelCase_ = os.path.join(__a , "README.md" ) print(F"Generating {path}" ) with open(__a , "w" , encoding="utf-8" ) as f: f.write(__a ) # make sure we are under the root of the project lowerCamelCase__ = Path(__file__).resolve().parent.parent.parent lowerCamelCase__ = repo_dir / '''model_cards''' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model_name.split('''-''') lowerCamelCase__ = model_cards_dir / '''facebook''' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
22
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) lowerCamelCase__ = { '''configuration_owlvit''': [ '''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OwlViTConfig''', '''OwlViTOnnxConfig''', '''OwlViTTextConfig''', '''OwlViTVisionConfig''', ], '''processing_owlvit''': ['''OwlViTProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ['''OwlViTFeatureExtractor'''] lowerCamelCase__ = ['''OwlViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OwlViTModel''', '''OwlViTPreTrainedModel''', '''OwlViTTextModel''', '''OwlViTVisionModel''', '''OwlViTForObjectDetection''', ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
import re from filelock import FileLock try: import nltk lowerCamelCase__ = True except (ImportError, ModuleNotFoundError): lowerCamelCase__ = False if NLTK_AVAILABLE: with FileLock('''.lock''') as lock: nltk.download('''punkt''', quiet=True) def A(__a: str ): re.sub("<n>" , "" , __a ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__a ) )
22
1
import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def A(__a: Union[str, Any] , __a: Tuple ): assert isinstance(__a , __a ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def A(__a: Any , __a: int , __a: int ): lowerCAmelCase_ = tmp_path / "cache" lowerCAmelCase_ = {"text": "string"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase_ = TextDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read() _check_text_dataset(__a , __a ) @pytest.mark.parametrize( "features" , [ None, {"text": "string"}, {"text": "int32"}, {"text": "float32"}, ] , ) def A(__a: Optional[int] , __a: Tuple , __a: List[str] ): lowerCAmelCase_ = tmp_path / "cache" lowerCAmelCase_ = {"text": "string"} lowerCAmelCase_ = features.copy() if features else default_expected_features lowerCAmelCase_ = ( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = TextDatasetReader(__a , features=__a , cache_dir=__a ).read() _check_text_dataset(__a , __a ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def A(__a: Optional[Any] , __a: Tuple , __a: Optional[int] ): lowerCAmelCase_ = tmp_path / "cache" lowerCAmelCase_ = {"text": "string"} lowerCAmelCase_ = TextDatasetReader(__a , cache_dir=__a , split=__a ).read() _check_text_dataset(__a , __a ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def A(__a: Union[str, Any] , __a: List[Any] , __a: str ): if issubclass(__a , __a ): lowerCAmelCase_ = text_path elif issubclass(__a , __a ): lowerCAmelCase_ = [text_path] lowerCAmelCase_ = tmp_path / "cache" lowerCAmelCase_ = {"text": "string"} lowerCAmelCase_ = TextDatasetReader(__a , cache_dir=__a ).read() _check_text_dataset(__a , __a ) def A(__a: Dict , __a: List[str] , __a: Optional[Any]=("train",) ): assert isinstance(__a , __a ) for split in splits: lowerCAmelCase_ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def A(__a: Tuple , __a: str , __a: str ): lowerCAmelCase_ = tmp_path / "cache" lowerCAmelCase_ = {"text": "string"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase_ = TextDatasetReader({"train": text_path} , cache_dir=__a , keep_in_memory=__a ).read() _check_text_datasetdict(__a , __a ) @pytest.mark.parametrize( "features" , [ None, {"text": "string"}, {"text": "int32"}, {"text": "float32"}, ] , ) def A(__a: Any , __a: int , __a: Dict ): lowerCAmelCase_ = tmp_path / "cache" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" lowerCAmelCase_ = {"text": "string"} lowerCAmelCase_ = features.copy() if features else default_expected_features lowerCAmelCase_ = ( Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase_ = TextDatasetReader({"train": text_path} , features=__a , cache_dir=__a ).read() _check_text_datasetdict(__a , __a ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def A(__a: Any , __a: Union[str, Any] , __a: Union[str, Any] ): if split: lowerCAmelCase_ = {split: text_path} else: lowerCAmelCase_ = "train" lowerCAmelCase_ = {"train": text_path, "test": text_path} lowerCAmelCase_ = tmp_path / "cache" lowerCAmelCase_ = {"text": "string"} lowerCAmelCase_ = TextDatasetReader(__a , cache_dir=__a ).read() _check_text_datasetdict(__a , __a , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
22
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowerCamelCase__ = { '''configuration_encodec''': [ '''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''EncodecConfig''', ], '''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''', '''EncodecModel''', '''EncodecPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
1
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL lowerCamelCase__ = logging.get_logger(__name__) def A(__a: np.ndarray , __a: Union[int, Iterable[int]] , __a: bool , __a: int ): def constraint_to_multiple_of(__a: Union[str, Any] , __a: Dict , __a: List[str]=0 , __a: List[Any]=None ): lowerCAmelCase_ = round(val / multiple ) * multiple if max_val is not None and x > max_val: lowerCAmelCase_ = math.floor(val / multiple ) * multiple if x < min_val: lowerCAmelCase_ = math.ceil(val / multiple ) * multiple return x lowerCAmelCase_ = (output_size, output_size) if isinstance(__a , __a ) else output_size lowerCAmelCase_ , lowerCAmelCase_ = get_image_size(__a ) lowerCAmelCase_ , lowerCAmelCase_ = output_size # determine new height and width lowerCAmelCase_ = output_height / input_height lowerCAmelCase_ = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width lowerCAmelCase_ = scale_width else: # fit height lowerCAmelCase_ = scale_height lowerCAmelCase_ = constraint_to_multiple_of(scale_height * input_height , multiple=__a ) lowerCAmelCase_ = constraint_to_multiple_of(scale_width * input_width , multiple=__a ) return (new_height, new_width) class __magic_name__ (__lowercase ): lowerCamelCase__ = ['''pixel_values'''] def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = False , _a = 1 , _a = True , _a = 1 / 255 , _a = True , _a = None , _a = None , **_a , ) -> None: super().__init__(**_a ) lowerCAmelCase_ = size if size is not None else {"height": 384, "width": 384} lowerCAmelCase_ = get_size_dict(_a ) lowerCAmelCase_ = do_resize lowerCAmelCase_ = size lowerCAmelCase_ = keep_aspect_ratio lowerCAmelCase_ = ensure_multiple_of lowerCAmelCase_ = resample lowerCAmelCase_ = do_rescale lowerCAmelCase_ = rescale_factor lowerCAmelCase_ = do_normalize lowerCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD def __a ( self , _a , _a , _a = False , _a = 1 , _a = PILImageResampling.BICUBIC , _a = None , **_a , ) -> np.ndarray: lowerCAmelCase_ = get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" ) lowerCAmelCase_ = get_resize_output_image_size( _a , output_size=(size["height"], size["width"]) , keep_aspect_ratio=_a , multiple=_a , ) return resize(_a , size=_a , resample=_a , data_format=_a , **_a ) def __a ( self , _a , _a , _a = None , **_a , ) -> str: return rescale(_a , scale=_a , data_format=_a , **_a ) def __a ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray: return normalize(_a , mean=_a , std=_a , data_format=_a , **_a ) def __a ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image: lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize lowerCAmelCase_ = size if size is not None else self.size lowerCAmelCase_ = get_size_dict(_a ) lowerCAmelCase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio lowerCAmelCase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of lowerCAmelCase_ = resample if resample is not None else self.resample lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase_ = image_mean if image_mean is not None else self.image_mean lowerCAmelCase_ = image_std if image_std is not None else self.image_std lowerCAmelCase_ = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. lowerCAmelCase_ = [to_numpy_array(_a ) for image in images] if do_resize: lowerCAmelCase_ = [self.resize(image=_a , size=_a , resample=_a ) for image in images] if do_rescale: lowerCAmelCase_ = [self.rescale(image=_a , scale=_a ) for image in images] if do_normalize: lowerCAmelCase_ = [self.normalize(image=_a , mean=_a , std=_a ) for image in images] lowerCAmelCase_ = [to_channel_dimension_format(_a , _a ) for image in images] lowerCAmelCase_ = {"pixel_values": images} return BatchFeature(data=_a , tensor_type=_a ) def __a ( self , _a , _a = None ) -> Dict: lowerCAmelCase_ = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(_a ) != len(_a ): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) if is_torch_tensor(_a ): lowerCAmelCase_ = target_sizes.numpy() lowerCAmelCase_ = [] for idx in range(len(_a ) ): lowerCAmelCase_ = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=_a ) lowerCAmelCase_ = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(_a ) else: lowerCAmelCase_ = logits.argmax(dim=1 ) lowerCAmelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
22
import logging from transformers import PretrainedConfig lowerCamelCase__ = logging.getLogger(__name__) lowerCamelCase__ = { '''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''', } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''bertabs''' def __init__( self , _a=30522 , _a=512 , _a=6 , _a=512 , _a=8 , _a=512 , _a=0.2 , _a=6 , _a=768 , _a=8 , _a=2048 , _a=0.2 , **_a , ) -> List[Any]: super().__init__(**_a ) lowerCAmelCase_ = vocab_size lowerCAmelCase_ = max_pos lowerCAmelCase_ = enc_layers lowerCAmelCase_ = enc_hidden_size lowerCAmelCase_ = enc_heads lowerCAmelCase_ = enc_ff_size lowerCAmelCase_ = enc_dropout lowerCAmelCase_ = dec_layers lowerCAmelCase_ = dec_hidden_size lowerCAmelCase_ = dec_heads lowerCAmelCase_ = dec_ff_size lowerCAmelCase_ = dec_dropout
22
1