code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ : Any = logging.get_logger(__name__) UpperCAmelCase__ : Optional[int] = {} class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :int = 'llama' snake_case__ :Any = ['past_key_values'] def __init__( self : Dict , __magic_name__ : List[Any]=32000 , __magic_name__ : Union[str, Any]=4096 , __magic_name__ : Tuple=11008 , __magic_name__ : Any=32 , __magic_name__ : int=32 , __magic_name__ : Dict=None , __magic_name__ : Dict="silu" , __magic_name__ : Any=2048 , __magic_name__ : str=0.02 , __magic_name__ : Union[str, Any]=1E-6 , __magic_name__ : int=True , __magic_name__ : Any=0 , __magic_name__ : Optional[int]=1 , __magic_name__ : List[str]=2 , __magic_name__ : Any=1 , __magic_name__ : int=False , __magic_name__ : List[Any]=None , **__magic_name__ : List[str] , ): """simple docstring""" lowerCAmelCase__ = vocab_size lowerCAmelCase__ = max_position_embeddings lowerCAmelCase__ = hidden_size lowerCAmelCase__ = intermediate_size lowerCAmelCase__ = num_hidden_layers lowerCAmelCase__ = num_attention_heads # for backward compatibility if num_key_value_heads is None: lowerCAmelCase__ = num_attention_heads lowerCAmelCase__ = num_key_value_heads lowerCAmelCase__ = hidden_act lowerCAmelCase__ = initializer_range lowerCAmelCase__ = rms_norm_eps lowerCAmelCase__ = pretraining_tp lowerCAmelCase__ = use_cache lowerCAmelCase__ = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , tie_word_embeddings=__magic_name__ , **__magic_name__ , ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __magic_name__ ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f"""got {self.rope_scaling}""" ) lowerCAmelCase__ = self.rope_scaling.get("type" , __magic_name__ ) lowerCAmelCase__ = self.rope_scaling.get("factor" , __magic_name__ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(__magic_name__ , __magic_name__ ) or rope_scaling_factor <= 1.0: raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
48
'''simple docstring''' from __future__ import annotations from collections import Counter from random import random class A : def __init__( self : Optional[int] ): """simple docstring""" lowerCAmelCase__ = {} def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ): """simple docstring""" lowerCAmelCase__ = {} def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : str , __magic_name__ : float ): """simple docstring""" if nodea not in self.connections: self.add_node(__magic_name__ ) if nodea not in self.connections: self.add_node(__magic_name__ ) lowerCAmelCase__ = probability def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" return list(self.connections ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ): """simple docstring""" lowerCAmelCase__ = 0 lowerCAmelCase__ = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def A ( UpperCamelCase_ : str , UpperCamelCase_ : list[tuple[str, str, float]] , UpperCamelCase_ : int ) -> dict[str, int]: '''simple docstring''' lowerCAmelCase__ = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ = Counter(graph.get_nodes() ) lowerCAmelCase__ = start for _ in range(UpperCamelCase_ ): lowerCAmelCase__ = graph.transition(UpperCamelCase_ ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
48
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: UpperCAmelCase__ : Optional[Any] = None UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__) UpperCAmelCase__ : List[str] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} UpperCAmelCase__ : Dict = { "vocab_file": { "facebook/mbart-large-en-ro": ( "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model" ), "facebook/mbart-large-cc25": ( "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model" ), }, "tokenizer_file": { "facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json", "facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json", }, } UpperCAmelCase__ : int = { "facebook/mbart-large-en-ro": 10_24, "facebook/mbart-large-cc25": 10_24, } # fmt: off UpperCAmelCase__ : str = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"] class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Optional[int] = VOCAB_FILES_NAMES snake_case__ :Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP snake_case__ :Optional[int] = ['input_ids', 'attention_mask'] snake_case__ :List[str] = MBartTokenizer snake_case__ :List[int] = [] snake_case__ :List[int] = [] def __init__( self : Tuple , __magic_name__ : Optional[Any]=None , __magic_name__ : Dict=None , __magic_name__ : Optional[int]="<s>" , __magic_name__ : str="</s>" , __magic_name__ : Dict="</s>" , __magic_name__ : Dict="<s>" , __magic_name__ : int="<unk>" , __magic_name__ : Tuple="<pad>" , __magic_name__ : str="<mask>" , __magic_name__ : str=None , __magic_name__ : Tuple=None , __magic_name__ : Optional[int]=None , **__magic_name__ : Dict , ): """simple docstring""" lowerCAmelCase__ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token super().__init__( vocab_file=__magic_name__ , tokenizer_file=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , src_lang=__magic_name__ , tgt_lang=__magic_name__ , additional_special_tokens=__magic_name__ , **__magic_name__ , ) lowerCAmelCase__ = vocab_file lowerCAmelCase__ = False if not self.vocab_file else True lowerCAmelCase__ = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} ) lowerCAmelCase__ = { lang_code: self.convert_tokens_to_ids(__magic_name__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } lowerCAmelCase__ = src_lang if src_lang is not None else "en_XX" lowerCAmelCase__ = self.convert_tokens_to_ids(self._src_lang ) lowerCAmelCase__ = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" return self._src_lang @src_lang.setter def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : str ): """simple docstring""" lowerCAmelCase__ = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ): """simple docstring""" lowerCAmelCase__ = [self.sep_token_id] lowerCAmelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : Optional[str] , __magic_name__ : Optional[str] , **__magic_name__ : Optional[Any] ): """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) lowerCAmelCase__ = src_lang lowerCAmelCase__ = self(__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ ) lowerCAmelCase__ = self.convert_tokens_to_ids(__magic_name__ ) lowerCAmelCase__ = tgt_lang_id return inputs def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[str] , __magic_name__ : str = "en_XX" , __magic_name__ : Optional[List[str]] = None , __magic_name__ : str = "ro_RO" , **__magic_name__ : Tuple , ): """simple docstring""" lowerCAmelCase__ = src_lang lowerCAmelCase__ = tgt_lang return super().prepare_seqaseq_batch(__magic_name__ , __magic_name__ , **__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Any ): """simple docstring""" lowerCAmelCase__ = self.convert_tokens_to_ids(__magic_name__ ) lowerCAmelCase__ = [] lowerCAmelCase__ = [self.eos_token_id, self.cur_lang_code] lowerCAmelCase__ = self.convert_ids_to_tokens(self.prefix_tokens ) lowerCAmelCase__ = self.convert_ids_to_tokens(self.suffix_tokens ) lowerCAmelCase__ = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ): """simple docstring""" lowerCAmelCase__ = self.convert_tokens_to_ids(__magic_name__ ) lowerCAmelCase__ = [] lowerCAmelCase__ = [self.eos_token_id, self.cur_lang_code] lowerCAmelCase__ = self.convert_ids_to_tokens(self.prefix_tokens ) lowerCAmelCase__ = self.convert_ids_to_tokens(self.suffix_tokens ) lowerCAmelCase__ = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : str , __magic_name__ : Optional[str] = None ): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(__magic_name__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" ) return lowerCAmelCase__ = os.path.join( __magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ): copyfile(self.vocab_file , __magic_name__ ) return (out_vocab_file,)
48
'''simple docstring''' import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration UpperCAmelCase__ : Optional[Any] = pytest.mark.integration UpperCAmelCase__ : str = {"comet"} UpperCAmelCase__ : Optional[Any] = importlib.util.find_spec("fairseq") is not None UpperCAmelCase__ : Optional[int] = {"code_eval"} UpperCAmelCase__ : List[Any] = os.name == "nt" UpperCAmelCase__ : Optional[int] = {"bertscore", "frugalscore", "perplexity"} UpperCAmelCase__ : int = importlib.util.find_spec("transformers") is not None def A ( UpperCamelCase_ : Optional[int] ) -> Optional[Any]: '''simple docstring''' @wraps(UpperCamelCase_ ) def wrapper(self : Optional[Any] , UpperCamelCase_ : List[str] ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest("\"test requires Fairseq\"" ) else: test_case(self , UpperCamelCase_ ) return wrapper def A ( UpperCamelCase_ : List[Any] ) -> str: '''simple docstring''' @wraps(UpperCamelCase_ ) def wrapper(self : Optional[int] , UpperCamelCase_ : int ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest("\"test requires transformers\"" ) else: test_case(self , UpperCamelCase_ ) return wrapper def A ( UpperCamelCase_ : Any ) -> int: '''simple docstring''' @wraps(UpperCamelCase_ ) def wrapper(self : Optional[int] , UpperCamelCase_ : Optional[Any] ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest("\"test not supported on Windows\"" ) else: test_case(self , UpperCamelCase_ ) return wrapper def A ( ) -> Tuple: '''simple docstring''' lowerCAmelCase__ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @local class A ( parameterized.TestCase ): snake_case__ :Union[str, Any] = {} snake_case__ :Optional[Any] = None @pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" ) @pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = "[...]" lowerCAmelCase__ = importlib.import_module( datasets.load.metric_module_factory(os.path.join("metrics" , __magic_name__ ) ).module_path ) lowerCAmelCase__ = datasets.load.import_main_class(metric_module.__name__ , dataset=__magic_name__ ) # check parameters lowerCAmelCase__ = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(__magic_name__ , metric_module.__name__ ): with self.use_local_metrics(): try: lowerCAmelCase__ = doctest.testmod(__magic_name__ , verbose=__magic_name__ , raise_on_error=__magic_name__ ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Tuple ): """simple docstring""" lowerCAmelCase__ = "[...]" lowerCAmelCase__ = importlib.import_module( datasets.load.metric_module_factory(os.path.join("metrics" , __magic_name__ ) ).module_path ) # run doctest with self.use_local_metrics(): lowerCAmelCase__ = doctest.testmod(__magic_name__ , verbose=__magic_name__ , raise_on_error=__magic_name__ ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str ): """simple docstring""" if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](__magic_name__ ): yield else: yield @contextmanager def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" def load_local_metric(__magic_name__ : Union[str, Any] , *__magic_name__ : Any , **__magic_name__ : Any ): return load_metric(os.path.join("metrics" , __magic_name__ ) , *__magic_name__ , **__magic_name__ ) with patch("datasets.load_metric" ) as mock_load_metric: lowerCAmelCase__ = load_local_metric yield @classmethod def __SCREAMING_SNAKE_CASE ( cls : Any , __magic_name__ : Optional[int] ): """simple docstring""" def wrapper(__magic_name__ : Dict ): lowerCAmelCase__ = contextmanager(__magic_name__ ) lowerCAmelCase__ = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher("bleurt" ) def A ( UpperCamelCase_ : str ) -> Any: '''simple docstring''' import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags class A ( SCREAMING_SNAKE_CASE__ ): def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] ): """simple docstring""" assert len(input_dict["input_ids"] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch("bleurt.score._create_predictor" ) as mock_create_predictor: lowerCAmelCase__ = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher("bertscore" ) def A ( UpperCamelCase_ : List[Any] ) -> Optional[Any]: '''simple docstring''' import torch def bert_cos_score_idf(UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : List[str] ): return torch.tensor([[1.0, 1.0, 1.0]] * len(UpperCamelCase_ ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch("bert_score.scorer.get_model" ), patch( "bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf: lowerCAmelCase__ = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher("comet" ) def A ( UpperCamelCase_ : Optional[int] ) -> Any: '''simple docstring''' def load_from_checkpoint(UpperCamelCase_ : Tuple ): class A : def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] , *__magic_name__ : int , **__magic_name__ : Dict ): """simple docstring""" assert len(__magic_name__ ) == 2 lowerCAmelCase__ = [0.19, 0.92] return scores, sum(__magic_name__ ) / len(__magic_name__ ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch("comet.download_model" ) as mock_download_model: lowerCAmelCase__ = None with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint: lowerCAmelCase__ = load_from_checkpoint yield def A ( ) -> Tuple: '''simple docstring''' lowerCAmelCase__ = load_metric(os.path.join("metrics" , "seqeval" ) ) lowerCAmelCase__ = "ERROR" lowerCAmelCase__ = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}""" with pytest.raises(UpperCamelCase_ , match=re.escape(UpperCamelCase_ ) ): metric.compute(predictions=[] , references=[] , scheme=UpperCamelCase_ )
48
1
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): snake_case__ :List[Any] = DiTPipeline snake_case__ :List[str] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS snake_case__ :List[Any] = PipelineTesterMixin.required_optional_params - { 'latents', 'num_images_per_prompt', 'callback', 'callback_steps', } snake_case__ :Optional[int] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS snake_case__ :Dict = False def __SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase__ = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=__magic_name__ , activation_fn="gelu-approximate" , num_embeds_ada_norm=1000 , norm_type="ada_norm_zero" , norm_elementwise_affine=__magic_name__ , ) lowerCAmelCase__ = AutoencoderKL() lowerCAmelCase__ = DDIMScheduler() lowerCAmelCase__ = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler} return components def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : List[str] , __magic_name__ : Union[str, Any]=0 ): """simple docstring""" if str(__magic_name__ ).startswith("mps" ): lowerCAmelCase__ = torch.manual_seed(__magic_name__ ) else: lowerCAmelCase__ = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ ) lowerCAmelCase__ = { "class_labels": [1], "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def __SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" lowerCAmelCase__ = "cpu" lowerCAmelCase__ = self.get_dummy_components() lowerCAmelCase__ = self.pipeline_class(**__magic_name__ ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) lowerCAmelCase__ = self.get_dummy_inputs(__magic_name__ ) lowerCAmelCase__ = pipe(**__magic_name__ ).images lowerCAmelCase__ = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) lowerCAmelCase__ = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) lowerCAmelCase__ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(__magic_name__ , 1E-3 ) def __SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" self._test_inference_batch_single_identical(relax_max_difference=__magic_name__ , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def __SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class A ( unittest.TestCase ): def __SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" ) pipe.to("cuda" ) lowerCAmelCase__ = ["vase", "umbrella", "white shark", "white wolf"] lowerCAmelCase__ = pipe.get_label_ids(__magic_name__ ) lowerCAmelCase__ = pipe(__magic_name__ , generator=__magic_name__ , num_inference_steps=40 , output_type="np" ).images for word, image in zip(__magic_name__ , __magic_name__ ): lowerCAmelCase__ = load_numpy( f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" ) assert np.abs((expected_image - image).max() ) < 1E-2 def __SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" lowerCAmelCase__ = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" ) lowerCAmelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("cuda" ) lowerCAmelCase__ = ["vase", "umbrella"] lowerCAmelCase__ = pipe.get_label_ids(__magic_name__ ) lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = pipe(__magic_name__ , generator=__magic_name__ , num_inference_steps=25 , output_type="np" ).images for word, image in zip(__magic_name__ , __magic_name__ ): lowerCAmelCase__ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" f"""/dit/{word}_512.npy""" ) assert np.abs((expected_image - image).max() ) < 1E-1
48
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool UpperCAmelCase__ : int = { "Acehnese Arabic": "ace_Arab", "Acehnese Latin": "ace_Latn", "Mesopotamian Arabic": "acm_Arab", "Ta'izzi-Adeni Arabic": "acq_Arab", "Tunisian Arabic": "aeb_Arab", "Afrikaans": "afr_Latn", "South Levantine Arabic": "ajp_Arab", "Akan": "aka_Latn", "Amharic": "amh_Ethi", "North Levantine Arabic": "apc_Arab", "Modern Standard Arabic": "arb_Arab", "Modern Standard Arabic Romanized": "arb_Latn", "Najdi Arabic": "ars_Arab", "Moroccan Arabic": "ary_Arab", "Egyptian Arabic": "arz_Arab", "Assamese": "asm_Beng", "Asturian": "ast_Latn", "Awadhi": "awa_Deva", "Central Aymara": "ayr_Latn", "South Azerbaijani": "azb_Arab", "North Azerbaijani": "azj_Latn", "Bashkir": "bak_Cyrl", "Bambara": "bam_Latn", "Balinese": "ban_Latn", "Belarusian": "bel_Cyrl", "Bemba": "bem_Latn", "Bengali": "ben_Beng", "Bhojpuri": "bho_Deva", "Banjar Arabic": "bjn_Arab", "Banjar Latin": "bjn_Latn", "Standard Tibetan": "bod_Tibt", "Bosnian": "bos_Latn", "Buginese": "bug_Latn", "Bulgarian": "bul_Cyrl", "Catalan": "cat_Latn", "Cebuano": "ceb_Latn", "Czech": "ces_Latn", "Chokwe": "cjk_Latn", "Central Kurdish": "ckb_Arab", "Crimean Tatar": "crh_Latn", "Welsh": "cym_Latn", "Danish": "dan_Latn", "German": "deu_Latn", "Southwestern Dinka": "dik_Latn", "Dyula": "dyu_Latn", "Dzongkha": "dzo_Tibt", "Greek": "ell_Grek", "English": "eng_Latn", "Esperanto": "epo_Latn", "Estonian": "est_Latn", "Basque": "eus_Latn", "Ewe": "ewe_Latn", "Faroese": "fao_Latn", "Fijian": "fij_Latn", "Finnish": "fin_Latn", "Fon": "fon_Latn", "French": "fra_Latn", "Friulian": "fur_Latn", "Nigerian Fulfulde": "fuv_Latn", "Scottish Gaelic": "gla_Latn", "Irish": "gle_Latn", "Galician": "glg_Latn", "Guarani": "grn_Latn", "Gujarati": "guj_Gujr", "Haitian Creole": "hat_Latn", "Hausa": "hau_Latn", "Hebrew": "heb_Hebr", "Hindi": "hin_Deva", "Chhattisgarhi": "hne_Deva", "Croatian": "hrv_Latn", "Hungarian": "hun_Latn", "Armenian": "hye_Armn", "Igbo": "ibo_Latn", "Ilocano": "ilo_Latn", "Indonesian": "ind_Latn", "Icelandic": "isl_Latn", "Italian": "ita_Latn", "Javanese": "jav_Latn", "Japanese": "jpn_Jpan", "Kabyle": "kab_Latn", "Jingpho": "kac_Latn", "Kamba": "kam_Latn", "Kannada": "kan_Knda", "Kashmiri Arabic": "kas_Arab", "Kashmiri Devanagari": "kas_Deva", "Georgian": "kat_Geor", "Central Kanuri Arabic": "knc_Arab", "Central Kanuri Latin": "knc_Latn", "Kazakh": "kaz_Cyrl", "Kabiyè": "kbp_Latn", "Kabuverdianu": "kea_Latn", "Khmer": "khm_Khmr", "Kikuyu": "kik_Latn", "Kinyarwanda": "kin_Latn", "Kyrgyz": "kir_Cyrl", "Kimbundu": "kmb_Latn", "Northern Kurdish": "kmr_Latn", "Kikongo": "kon_Latn", "Korean": "kor_Hang", "Lao": "lao_Laoo", "Ligurian": "lij_Latn", "Limburgish": "lim_Latn", "Lingala": "lin_Latn", "Lithuanian": "lit_Latn", "Lombard": "lmo_Latn", "Latgalian": "ltg_Latn", "Luxembourgish": "ltz_Latn", "Luba-Kasai": "lua_Latn", "Ganda": "lug_Latn", "Luo": "luo_Latn", "Mizo": "lus_Latn", "Standard Latvian": "lvs_Latn", "Magahi": "mag_Deva", "Maithili": "mai_Deva", "Malayalam": "mal_Mlym", "Marathi": "mar_Deva", "Minangkabau Arabic ": "min_Arab", "Minangkabau Latin": "min_Latn", "Macedonian": "mkd_Cyrl", "Plateau Malagasy": "plt_Latn", "Maltese": "mlt_Latn", "Meitei Bengali": "mni_Beng", "Halh Mongolian": "khk_Cyrl", "Mossi": "mos_Latn", "Maori": "mri_Latn", "Burmese": "mya_Mymr", "Dutch": "nld_Latn", "Norwegian Nynorsk": "nno_Latn", "Norwegian Bokmål": "nob_Latn", "Nepali": "npi_Deva", "Northern Sotho": "nso_Latn", "Nuer": "nus_Latn", "Nyanja": "nya_Latn", "Occitan": "oci_Latn", "West Central Oromo": "gaz_Latn", "Odia": "ory_Orya", "Pangasinan": "pag_Latn", "Eastern Panjabi": "pan_Guru", "Papiamento": "pap_Latn", "Western Persian": "pes_Arab", "Polish": "pol_Latn", "Portuguese": "por_Latn", "Dari": "prs_Arab", "Southern Pashto": "pbt_Arab", "Ayacucho Quechua": "quy_Latn", "Romanian": "ron_Latn", "Rundi": "run_Latn", "Russian": "rus_Cyrl", "Sango": "sag_Latn", "Sanskrit": "san_Deva", "Santali": "sat_Olck", "Sicilian": "scn_Latn", "Shan": "shn_Mymr", "Sinhala": "sin_Sinh", "Slovak": "slk_Latn", "Slovenian": "slv_Latn", "Samoan": "smo_Latn", "Shona": "sna_Latn", "Sindhi": "snd_Arab", "Somali": "som_Latn", "Southern Sotho": "sot_Latn", "Spanish": "spa_Latn", "Tosk Albanian": "als_Latn", "Sardinian": "srd_Latn", "Serbian": "srp_Cyrl", "Swati": "ssw_Latn", "Sundanese": "sun_Latn", "Swedish": "swe_Latn", "Swahili": "swh_Latn", "Silesian": "szl_Latn", "Tamil": "tam_Taml", "Tatar": "tat_Cyrl", "Telugu": "tel_Telu", "Tajik": "tgk_Cyrl", "Tagalog": "tgl_Latn", "Thai": "tha_Thai", "Tigrinya": "tir_Ethi", "Tamasheq Latin": "taq_Latn", "Tamasheq Tifinagh": "taq_Tfng", "Tok Pisin": "tpi_Latn", "Tswana": "tsn_Latn", "Tsonga": "tso_Latn", "Turkmen": "tuk_Latn", "Tumbuka": "tum_Latn", "Turkish": "tur_Latn", "Twi": "twi_Latn", "Central Atlas Tamazight": "tzm_Tfng", "Uyghur": "uig_Arab", "Ukrainian": "ukr_Cyrl", "Umbundu": "umb_Latn", "Urdu": "urd_Arab", "Northern Uzbek": "uzn_Latn", "Venetian": "vec_Latn", "Vietnamese": "vie_Latn", "Waray": "war_Latn", "Wolof": "wol_Latn", "Xhosa": "xho_Latn", "Eastern Yiddish": "ydd_Hebr", "Yoruba": "yor_Latn", "Yue Chinese": "yue_Hant", "Chinese Simplified": "zho_Hans", "Chinese Traditional": "zho_Hant", "Standard Malay": "zsm_Latn", "Zulu": "zul_Latn", } class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Tuple = 'facebook/nllb-200-distilled-600M' snake_case__ :Optional[Any] = ( 'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ' 'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ' 'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ' 'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.' ) snake_case__ :List[Any] = 'translator' snake_case__ :List[Any] = AutoTokenizer snake_case__ :Optional[Any] = AutoModelForSeqaSeqLM snake_case__ :List[str] = LANGUAGE_CODES snake_case__ :List[Any] = ['text', 'text', 'text'] snake_case__ :List[Any] = ['text'] def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ): """simple docstring""" if src_lang not in self.lang_to_code: raise ValueError(f"""{src_lang} is not a supported language.""" ) if tgt_lang not in self.lang_to_code: raise ValueError(f"""{tgt_lang} is not a supported language.""" ) lowerCAmelCase__ = self.lang_to_code[src_lang] lowerCAmelCase__ = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( __magic_name__ , return_tensors="pt" , src_lang=__magic_name__ , tgt_lang=__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] ): """simple docstring""" return self.model.generate(**__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Tuple ): """simple docstring""" return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__magic_name__ )
48
1
'''simple docstring''' import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def A ( UpperCamelCase_ : List[Any] ) -> Tuple: '''simple docstring''' if "img_encoder.pos_embed" in name: lowerCAmelCase__ = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" ) if "img_encoder.patch_embed.proj" in name: lowerCAmelCase__ = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" ) if "img_encoder.patch_embed.norm" in name: lowerCAmelCase__ = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" ) if "img_encoder.layers" in name: lowerCAmelCase__ = name.replace("img_encoder.layers" , "vision_model.encoder.stages" ) if "blocks" in name and "res" not in name: lowerCAmelCase__ = name.replace("blocks" , "layers" ) if "attn" in name and "pre_assign" not in name: lowerCAmelCase__ = name.replace("attn" , "self_attn" ) if "proj" in name and "self_attn" in name and "text" not in name: lowerCAmelCase__ = name.replace("proj" , "out_proj" ) if "pre_assign_attn.attn.proj" in name: lowerCAmelCase__ = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" ) if "norm1" in name: lowerCAmelCase__ = name.replace("norm1" , "layer_norm1" ) if "norm2" in name and "pre_assign" not in name: lowerCAmelCase__ = name.replace("norm2" , "layer_norm2" ) if "img_encoder.norm" in name: lowerCAmelCase__ = name.replace("img_encoder.norm" , "vision_model.layernorm" ) # text encoder if "text_encoder.token_embedding" in name: lowerCAmelCase__ = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" ) if "text_encoder.positional_embedding" in name: lowerCAmelCase__ = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" ) if "text_encoder.transformer.resblocks." in name: lowerCAmelCase__ = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." ) if "ln_1" in name: lowerCAmelCase__ = name.replace("ln_1" , "layer_norm1" ) if "ln_2" in name: lowerCAmelCase__ = name.replace("ln_2" , "layer_norm2" ) if "c_fc" in name: lowerCAmelCase__ = name.replace("c_fc" , "fc1" ) if "c_proj" in name: lowerCAmelCase__ = name.replace("c_proj" , "fc2" ) if "text_encoder" in name: lowerCAmelCase__ = name.replace("text_encoder" , "text_model" ) if "ln_final" in name: lowerCAmelCase__ = name.replace("ln_final" , "final_layer_norm" ) # projection layers if "img_projector.linear_hidden." in name: lowerCAmelCase__ = name.replace("img_projector.linear_hidden." , "visual_projection." ) if "img_projector.linear_out." in name: lowerCAmelCase__ = name.replace("img_projector.linear_out." , "visual_projection.3." ) if "text_projector.linear_hidden" in name: lowerCAmelCase__ = name.replace("text_projector.linear_hidden" , "text_projection" ) if "text_projector.linear_out" in name: lowerCAmelCase__ = name.replace("text_projector.linear_out" , "text_projection.3" ) return name def A ( UpperCamelCase_ : str , UpperCamelCase_ : str ) -> List[Any]: '''simple docstring''' for key in orig_state_dict.copy().keys(): lowerCAmelCase__ = orig_state_dict.pop(UpperCamelCase_ ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors lowerCAmelCase__ = key.split("." ) lowerCAmelCase__ ,lowerCAmelCase__ = int(key_split[2] ), int(key_split[4] ) lowerCAmelCase__ = config.vision_config.hidden_size if "weight" in key: lowerCAmelCase__ = val[:dim, :] lowerCAmelCase__ = val[dim : dim * 2, :] lowerCAmelCase__ = val[-dim:, :] else: lowerCAmelCase__ = val[:dim] lowerCAmelCase__ = val[dim : dim * 2] lowerCAmelCase__ = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors lowerCAmelCase__ = key.split("." ) lowerCAmelCase__ = int(key_split[3] ) lowerCAmelCase__ = config.text_config.hidden_size if "weight" in key: lowerCAmelCase__ = val[:dim, :] lowerCAmelCase__ = val[ dim : dim * 2, : ] lowerCAmelCase__ = val[-dim:, :] else: lowerCAmelCase__ = val[:dim] lowerCAmelCase__ = val[dim : dim * 2] lowerCAmelCase__ = val[-dim:] else: lowerCAmelCase__ = rename_key(UpperCamelCase_ ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): lowerCAmelCase__ = val.squeeze_() else: lowerCAmelCase__ = val return orig_state_dict def A ( ) -> Optional[int]: '''simple docstring''' lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ) return im @torch.no_grad() def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple="groupvit-gcc-yfcc" , UpperCamelCase_ : Dict=False ) -> Any: '''simple docstring''' lowerCAmelCase__ = GroupViTConfig() lowerCAmelCase__ = GroupViTModel(UpperCamelCase_ ).eval() lowerCAmelCase__ = torch.load(UpperCamelCase_ , map_location="cpu" )["model"] lowerCAmelCase__ = convert_state_dict(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ ,lowerCAmelCase__ = model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCamelCase_ ) == 0) # verify result lowerCAmelCase__ = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" ) lowerCAmelCase__ = prepare_img() lowerCAmelCase__ = processor(text=["a photo of a cat", "a photo of a dog"] , images=UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors="pt" ) with torch.no_grad(): lowerCAmelCase__ = model(**UpperCamelCase_ ) if model_name == "groupvit-gcc-yfcc": lowerCAmelCase__ = torch.tensor([[13.3_523, 6.3_629]] ) elif model_name == "groupvit-gcc-redcaps": lowerCAmelCase__ = torch.tensor([[16.1_873, 8.6_230]] ) else: raise ValueError(F"""Model name {model_name} not supported.""" ) assert torch.allclose(outputs.logits_per_image , UpperCamelCase_ , atol=1E-3 ) processor.save_pretrained(UpperCamelCase_ ) model.save_pretrained(UpperCamelCase_ ) print("Successfully saved processor and model to" , UpperCamelCase_ ) if push_to_hub: print("Pushing to the hub..." ) processor.push_to_hub(UpperCamelCase_ , organization="nielsr" ) model.push_to_hub(UpperCamelCase_ , organization="nielsr" ) if __name__ == "__main__": UpperCAmelCase__ : List[str] = argparse.ArgumentParser() parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model." ) parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint") parser.add_argument( "--model_name", default="groupvit-gccy-fcc", type=str, help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'", ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.", ) UpperCAmelCase__ : Any = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
48
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ : int = logging.get_logger(__name__) class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Any = 'timm_backbone' def __init__( self : Tuple , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=3 , __magic_name__ : Dict=True , __magic_name__ : str=True , __magic_name__ : List[Any]=None , **__magic_name__ : Tuple , ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = backbone lowerCAmelCase__ = num_channels lowerCAmelCase__ = features_only lowerCAmelCase__ = use_pretrained_backbone lowerCAmelCase__ = True lowerCAmelCase__ = out_indices if out_indices is not None else (-1,)
48
1
'''simple docstring''' import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int]=None ) -> List[str]: '''simple docstring''' lowerCAmelCase__ = None if token is not None: lowerCAmelCase__ = {"Accept": "application/vnd.github+json", "Authorization": F"""Bearer {token}"""} lowerCAmelCase__ = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100""" lowerCAmelCase__ = requests.get(UpperCamelCase_ , headers=UpperCamelCase_ ).json() lowerCAmelCase__ = {} try: job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) lowerCAmelCase__ = math.ceil((result["total_count"] - 1_00) / 1_00 ) for i in range(UpperCamelCase_ ): lowerCAmelCase__ = requests.get(url + F"""&page={i + 2}""" , headers=UpperCamelCase_ ).json() job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) return job_links except Exception: print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict=None ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase__ = None if token is not None: lowerCAmelCase__ = {"Accept": "application/vnd.github+json", "Authorization": F"""Bearer {token}"""} lowerCAmelCase__ = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100""" lowerCAmelCase__ = requests.get(UpperCamelCase_ , headers=UpperCamelCase_ ).json() lowerCAmelCase__ = {} try: artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) lowerCAmelCase__ = math.ceil((result["total_count"] - 1_00) / 1_00 ) for i in range(UpperCamelCase_ ): lowerCAmelCase__ = requests.get(url + F"""&page={i + 2}""" , headers=UpperCamelCase_ ).json() artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) return artifacts except Exception: print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int ) -> List[Any]: '''simple docstring''' lowerCAmelCase__ = None if token is not None: lowerCAmelCase__ = {"Accept": "application/vnd.github+json", "Authorization": F"""Bearer {token}"""} lowerCAmelCase__ = requests.get(UpperCamelCase_ , headers=UpperCamelCase_ , allow_redirects=UpperCamelCase_ ) lowerCAmelCase__ = result.headers["Location"] lowerCAmelCase__ = requests.get(UpperCamelCase_ , allow_redirects=UpperCamelCase_ ) lowerCAmelCase__ = os.path.join(UpperCamelCase_ , F"""{artifact_name}.zip""" ) with open(UpperCamelCase_ , "wb" ) as fp: fp.write(response.content ) def A ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int]=None ) -> int: '''simple docstring''' lowerCAmelCase__ = [] lowerCAmelCase__ = [] lowerCAmelCase__ = None with zipfile.ZipFile(UpperCamelCase_ ) as z: for filename in z.namelist(): if not os.path.isdir(UpperCamelCase_ ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(UpperCamelCase_ ) as f: for line in f: lowerCAmelCase__ = line.decode("UTF-8" ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs lowerCAmelCase__ = line[: line.index(": " )] lowerCAmelCase__ = line[line.index(": " ) + len(": " ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith("FAILED " ): # `test` is the test method that failed lowerCAmelCase__ = line[len("FAILED " ) :] failed_tests.append(UpperCamelCase_ ) elif filename == "job_name.txt": lowerCAmelCase__ = line if len(UpperCamelCase_ ) != len(UpperCamelCase_ ): raise ValueError( F"""`errors` and `failed_tests` should have the same number of elements. Got {len(UpperCamelCase_ )} for `errors` """ F"""and {len(UpperCamelCase_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some""" " problem." ) lowerCAmelCase__ = None if job_name and job_links: lowerCAmelCase__ = job_links.get(UpperCamelCase_ , UpperCamelCase_ ) # A list with elements of the form (line of error, error, failed test) lowerCAmelCase__ = [x + [y] + [job_link] for x, y in zip(UpperCamelCase_ , UpperCamelCase_ )] return result def A ( UpperCamelCase_ : str , UpperCamelCase_ : str=None ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase__ = [] lowerCAmelCase__ = [os.path.join(UpperCamelCase_ , UpperCamelCase_ ) for p in os.listdir(UpperCamelCase_ ) if p.endswith(".zip" )] for p in paths: errors.extend(get_errors_from_single_artifact(UpperCamelCase_ , job_links=UpperCamelCase_ ) ) return errors def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=None ) -> int: '''simple docstring''' lowerCAmelCase__ = Counter() counter.update([x[1] for x in logs] ) lowerCAmelCase__ = counter.most_common() lowerCAmelCase__ = {} for error, count in counts: if error_filter is None or error not in error_filter: lowerCAmelCase__ = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]} lowerCAmelCase__ = dict(sorted(r.items() , key=lambda UpperCamelCase_ : item[1]["count"] , reverse=UpperCamelCase_ ) ) return r def A ( UpperCamelCase_ : List[str] ) -> Tuple: '''simple docstring''' lowerCAmelCase__ = test.split("::" )[0] if test.startswith("tests/models/" ): lowerCAmelCase__ = test.split("/" )[2] else: lowerCAmelCase__ = None return test def A ( UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str]=None ) -> List[Any]: '''simple docstring''' lowerCAmelCase__ = [(x[0], x[1], get_model(x[2] )) for x in logs] lowerCAmelCase__ = [x for x in logs if x[2] is not None] lowerCAmelCase__ = {x[2] for x in logs} lowerCAmelCase__ = {} for test in tests: lowerCAmelCase__ = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) lowerCAmelCase__ = counter.most_common() lowerCAmelCase__ = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} lowerCAmelCase__ = sum(error_counts.values() ) if n_errors > 0: lowerCAmelCase__ = {"count": n_errors, "errors": error_counts} lowerCAmelCase__ = dict(sorted(r.items() , key=lambda UpperCamelCase_ : item[1]["count"] , reverse=UpperCamelCase_ ) ) return r def A ( UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase__ = "| no. | error | status |" lowerCAmelCase__ = "|-:|:-|:-|" lowerCAmelCase__ = [header, sep] for error in reduced_by_error: lowerCAmelCase__ = reduced_by_error[error]["count"] lowerCAmelCase__ = F"""| {count} | {error[:1_00]} | |""" lines.append(UpperCamelCase_ ) return "\n".join(UpperCamelCase_ ) def A ( UpperCamelCase_ : Tuple ) -> Dict: '''simple docstring''' lowerCAmelCase__ = "| model | no. of errors | major error | count |" lowerCAmelCase__ = "|-:|-:|-:|-:|" lowerCAmelCase__ = [header, sep] for model in reduced_by_model: lowerCAmelCase__ = reduced_by_model[model]["count"] lowerCAmelCase__ ,lowerCAmelCase__ = list(reduced_by_model[model]["errors"].items() )[0] lowerCAmelCase__ = F"""| {model} | {count} | {error[:60]} | {_count} |""" lines.append(UpperCamelCase_ ) return "\n".join(UpperCamelCase_ ) if __name__ == "__main__": UpperCAmelCase__ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") parser.add_argument( "--output_dir", type=str, required=True, help="Where to store the downloaded artifacts and other result files.", ) parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.") UpperCAmelCase__ : List[Any] = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) UpperCAmelCase__ : List[Any] = get_job_links(args.workflow_run_id, token=args.token) UpperCAmelCase__ : List[str] = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: UpperCAmelCase__ : List[str] = k.find(" / ") UpperCAmelCase__ : Tuple = k[index + len(" / ") :] UpperCAmelCase__ : List[str] = v with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) UpperCAmelCase__ : Optional[int] = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) UpperCAmelCase__ : str = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error UpperCAmelCase__ : List[str] = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors UpperCAmelCase__ : str = counter.most_common(30) for item in most_common: print(item) with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) UpperCAmelCase__ : int = reduce_by_error(errors) UpperCAmelCase__ : List[str] = reduce_by_model(errors) UpperCAmelCase__ : Union[str, Any] = make_github_table(reduced_by_error) UpperCAmelCase__ : Dict = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp: fp.write(sa) with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp: fp.write(sa)
48
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Tuple = 'Salesforce/blip-image-captioning-base' snake_case__ :List[Any] = ( 'This is a tool that generates a description of an image. It takes an input named `image` which should be the ' 'image to caption, and returns a text that contains the description in English.' ) snake_case__ :List[Any] = 'image_captioner' snake_case__ :Optional[int] = AutoModelForVisionaSeq snake_case__ :Optional[int] = ['image'] snake_case__ :Any = ['text'] def __init__( self : str , *__magic_name__ : List[str] , **__magic_name__ : Tuple ): """simple docstring""" requires_backends(self , ["vision"] ) super().__init__(*__magic_name__ , **__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : "Image" ): """simple docstring""" return self.pre_processor(images=__magic_name__ , return_tensors="pt" ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Tuple ): """simple docstring""" return self.model.generate(**__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Optional[int] ): """simple docstring""" return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0].strip()
48
1
'''simple docstring''' import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger UpperCAmelCase__ : Tuple = get_logger(__name__) UpperCAmelCase__ : Optional[Any] = Path(__file__).parent / "model_card_template.md" UpperCAmelCase__ : int = uuida().hex UpperCAmelCase__ : List[Any] = os.getenv("HF_HUB_OFFLINE", "").upper() in ENV_VARS_TRUE_VALUES UpperCAmelCase__ : List[str] = os.getenv("DISABLE_TELEMETRY", "").upper() in ENV_VARS_TRUE_VALUES UpperCAmelCase__ : Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/" def A ( UpperCamelCase_ : Union[Dict, str, None] = None ) -> str: '''simple docstring''' lowerCAmelCase__ = F"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}""" if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += F"""; torch/{_torch_version}""" if is_flax_available(): ua += F"""; jax/{_jax_version}""" ua += F"""; flax/{_flax_version}""" if is_onnx_available(): ua += F"""; onnxruntime/{_onnxruntime_version}""" # CI will set this value to True if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(UpperCamelCase_ , UpperCamelCase_ ): ua += "; " + "; ".join(F"""{k}/{v}""" for k, v in user_agent.items() ) elif isinstance(UpperCamelCase_ , UpperCamelCase_ ): ua += "; " + user_agent return ua def A ( UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[str] = None ) -> Optional[Any]: '''simple docstring''' if token is None: lowerCAmelCase__ = HfFolder.get_token() if organization is None: lowerCAmelCase__ = whoami(UpperCamelCase_ )["name"] return F"""{username}/{model_id}""" else: return F"""{organization}/{model_id}""" def A ( UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] ) -> str: '''simple docstring''' if not is_jinja_available(): raise ValueError( "Modelcard rendering is based on Jinja templates." " Please make sure to have `jinja` installed before using `create_model_card`." " To install it, please run `pip install Jinja2`." ) if hasattr(UpperCamelCase_ , "local_rank" ) and args.local_rank not in [-1, 0]: return lowerCAmelCase__ = args.hub_token if hasattr(UpperCamelCase_ , "hub_token" ) else None lowerCAmelCase__ = get_full_repo_name(UpperCamelCase_ , token=UpperCamelCase_ ) lowerCAmelCase__ = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=UpperCamelCase_ , model_name=UpperCamelCase_ , repo_name=UpperCamelCase_ , dataset_name=args.dataset_name if hasattr(UpperCamelCase_ , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(UpperCamelCase_ , "gradient_accumulation_steps" ) else None ) , adam_betaa=args.adam_betaa if hasattr(UpperCamelCase_ , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(UpperCamelCase_ , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(UpperCamelCase_ , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(UpperCamelCase_ , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(UpperCamelCase_ , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(UpperCamelCase_ , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(UpperCamelCase_ , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(UpperCamelCase_ , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(UpperCamelCase_ , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , ) lowerCAmelCase__ = os.path.join(args.output_dir , "README.md" ) model_card.save(UpperCamelCase_ ) def A ( UpperCamelCase_ : Optional[str] , UpperCamelCase_ : Optional[str] = None ) -> List[str]: '''simple docstring''' if resolved_file is None or commit_hash is not None: return commit_hash lowerCAmelCase__ = str(Path(UpperCamelCase_ ).as_posix() ) lowerCAmelCase__ = re.search(r"snapshots/([^/]+)/" , UpperCamelCase_ ) if search is None: return None lowerCAmelCase__ = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(UpperCamelCase_ ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. UpperCAmelCase__ : Union[str, Any] = os.path.expanduser( os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface")) ) UpperCAmelCase__ : Tuple = os.path.join(hf_cache_home, "diffusers") def A ( UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[str] = None ) -> None: '''simple docstring''' if new_cache_dir is None: lowerCAmelCase__ = DIFFUSERS_CACHE if old_cache_dir is None: lowerCAmelCase__ = old_diffusers_cache lowerCAmelCase__ = Path(UpperCamelCase_ ).expanduser() lowerCAmelCase__ = Path(UpperCamelCase_ ).expanduser() for old_blob_path in old_cache_dir.glob("**/blobs/*" ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): lowerCAmelCase__ = new_cache_dir / old_blob_path.relative_to(UpperCamelCase_ ) new_blob_path.parent.mkdir(parents=UpperCamelCase_ , exist_ok=UpperCamelCase_ ) os.replace(UpperCamelCase_ , UpperCamelCase_ ) try: os.symlink(UpperCamelCase_ , UpperCamelCase_ ) except OSError: logger.warning( "Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). UpperCAmelCase__ : int = os.path.join(DIFFUSERS_CACHE, "version_diffusers_cache.txt") if not os.path.isfile(cache_version_file): UpperCAmelCase__ : Any = 0 else: with open(cache_version_file) as f: try: UpperCAmelCase__ : Optional[int] = int(f.read()) except ValueError: UpperCAmelCase__ : List[Any] = 0 if cache_version < 1: UpperCAmelCase__ : List[str] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( "The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your " "existing cached models. This is a one-time operation, you can interrupt it or run it " "later by calling `diffusers.utils.hub_utils.move_cache()`." ) try: move_cache() except Exception as e: UpperCAmelCase__ : Any = "\n".join(traceback.format_tb(e.__traceback__)) logger.error( F"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease " "file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole " "message and we will do our best to help." ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, "w") as f: f.write("1") except Exception: logger.warning( F"There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure " "the directory exists and can be written to." ) def A ( UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ) -> str: '''simple docstring''' if variant is not None: lowerCAmelCase__ = weights_name.split("." ) lowerCAmelCase__ = splits[:-1] + [variant] + splits[-1:] lowerCAmelCase__ = ".".join(UpperCamelCase_ ) return weights_name def A ( UpperCamelCase_ : Tuple , *, UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict=None , ) -> Dict: '''simple docstring''' lowerCAmelCase__ = str(UpperCamelCase_ ) if os.path.isfile(UpperCamelCase_ ): return pretrained_model_name_or_path elif os.path.isdir(UpperCamelCase_ ): if os.path.isfile(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) ): # Load from a PyTorch checkpoint lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) ): lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) return model_file else: raise EnvironmentError( F"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(UpperCamelCase_ ).base_version ) >= version.parse("0.20.0" ) ): try: lowerCAmelCase__ = hf_hub_download( UpperCamelCase_ , filename=_add_variant(UpperCamelCase_ , UpperCamelCase_ ) , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , proxies=UpperCamelCase_ , resume_download=UpperCamelCase_ , local_files_only=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , user_agent=UpperCamelCase_ , subfolder=UpperCamelCase_ , revision=revision or commit_hash , ) warnings.warn( F"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , UpperCamelCase_ , ) return model_file except: # noqa: E722 warnings.warn( F"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(UpperCamelCase_ , UpperCamelCase_ )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(UpperCamelCase_ , UpperCamelCase_ )}' so that the correct variant file can be added.""" , UpperCamelCase_ , ) try: # 2. Load model file as usual lowerCAmelCase__ = hf_hub_download( UpperCamelCase_ , filename=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , proxies=UpperCamelCase_ , resume_download=UpperCamelCase_ , local_files_only=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , user_agent=UpperCamelCase_ , subfolder=UpperCamelCase_ , revision=revision or commit_hash , ) return model_file except RepositoryNotFoundError: raise EnvironmentError( F"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """ "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli " "login`." ) except RevisionNotFoundError: raise EnvironmentError( F"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """ "this model name. Check the model page at " F"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" ) except EntryNotFoundError: raise EnvironmentError( F"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" ) except HTTPError as err: raise EnvironmentError( F"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" ) except ValueError: raise EnvironmentError( F"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it""" F""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a""" F""" directory containing a file named {weights_name} or""" " \nCheckout your internet connection or see how to run the library in" " offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." ) except EnvironmentError: raise EnvironmentError( F"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """ "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " F"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """ F"""containing a file named {weights_name}""" )
48
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging UpperCAmelCase__ : Tuple = logging.get_logger(__name__) UpperCAmelCase__ : Union[str, Any] = "▁" UpperCAmelCase__ : List[str] = {"vocab_file": "sentencepiece.bpe.model"} UpperCAmelCase__ : Union[str, Any] = { "vocab_file": { "facebook/mbart-large-50-one-to-many-mmt": ( "https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model" ), } } UpperCAmelCase__ : Optional[Any] = { "facebook/mbart-large-50-one-to-many-mmt": 10_24, } # fmt: off UpperCAmelCase__ : Tuple = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"] class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Optional[int] = VOCAB_FILES_NAMES snake_case__ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ :Any = PRETRAINED_VOCAB_FILES_MAP snake_case__ :Tuple = ['input_ids', 'attention_mask'] snake_case__ :List[int] = [] snake_case__ :List[int] = [] def __init__( self : int , __magic_name__ : int , __magic_name__ : Dict=None , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[int]="</s>" , __magic_name__ : List[Any]="</s>" , __magic_name__ : List[Any]="<s>" , __magic_name__ : Tuple="<unk>" , __magic_name__ : List[Any]="<pad>" , __magic_name__ : List[Any]="<mask>" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : List[Any] , ): """simple docstring""" lowerCAmelCase__ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs lowerCAmelCase__ = kwargs.get("additional_special_tokens" , [] ) kwargs["additional_special_tokens"] += [ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=__magic_name__ , tgt_lang=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , ) lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__magic_name__ ) ) lowerCAmelCase__ = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token lowerCAmelCase__ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowerCAmelCase__ = 1 lowerCAmelCase__ = len(self.sp_model ) lowerCAmelCase__ = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__magic_name__ ) } lowerCAmelCase__ = {v: k for k, v in self.lang_code_to_id.items()} lowerCAmelCase__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) lowerCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} lowerCAmelCase__ = src_lang if src_lang is not None else "en_XX" lowerCAmelCase__ = self.lang_code_to_id[self._src_lang] lowerCAmelCase__ = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def __SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" return self._src_lang @src_lang.setter def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ): """simple docstring""" lowerCAmelCase__ = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : Dict ): """simple docstring""" lowerCAmelCase__ = self.__dict__.copy() lowerCAmelCase__ = None return state def __setstate__( self : List[Any] , __magic_name__ : Dict ): """simple docstring""" lowerCAmelCase__ = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): lowerCAmelCase__ = {} lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" lowerCAmelCase__ = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : str ): """simple docstring""" return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ): """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowerCAmelCase__ = self.sp_model.PieceToId(__magic_name__ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : int ): """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[Any] ): """simple docstring""" lowerCAmelCase__ = [] lowerCAmelCase__ = "" lowerCAmelCase__ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__magic_name__ ) + token lowerCAmelCase__ = True lowerCAmelCase__ = [] else: current_sub_tokens.append(__magic_name__ ) lowerCAmelCase__ = False out_string += self.sp_model.decode(__magic_name__ ) return out_string.strip() def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ): """simple docstring""" if not os.path.isdir(__magic_name__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ = os.path.join( __magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __magic_name__ ) elif not os.path.isfile(self.vocab_file ): with open(__magic_name__ , "wb" ) as fi: lowerCAmelCase__ = self.sp_model.serialized_model_proto() fi.write(__magic_name__ ) return (out_vocab_file,) def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ ) lowerCAmelCase__ = [1] * len(self.prefix_tokens ) lowerCAmelCase__ = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__magic_name__ )) + suffix_ones return prefix_ones + ([0] * len(__magic_name__ )) + ([0] * len(__magic_name__ )) + suffix_ones def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Optional[str] , __magic_name__ : Optional[str] , **__magic_name__ : Optional[Any] ): """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) lowerCAmelCase__ = src_lang lowerCAmelCase__ = self(__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ ) lowerCAmelCase__ = self.convert_tokens_to_ids(__magic_name__ ) lowerCAmelCase__ = tgt_lang_id return inputs def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : str = "en_XX" , __magic_name__ : Optional[List[str]] = None , __magic_name__ : str = "ro_RO" , **__magic_name__ : Union[str, Any] , ): """simple docstring""" lowerCAmelCase__ = src_lang lowerCAmelCase__ = tgt_lang return super().prepare_seqaseq_batch(__magic_name__ , __magic_name__ , **__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ): """simple docstring""" lowerCAmelCase__ = self.lang_code_to_id[src_lang] lowerCAmelCase__ = [self.cur_lang_code_id] lowerCAmelCase__ = [self.eos_token_id] def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ): """simple docstring""" lowerCAmelCase__ = self.lang_code_to_id[tgt_lang] lowerCAmelCase__ = [self.cur_lang_code_id] lowerCAmelCase__ = [self.eos_token_id]
48
1
'''simple docstring''' from typing import Any def A ( UpperCamelCase_ : list , UpperCamelCase_ : list , UpperCamelCase_ : dict , UpperCamelCase_ : dict , UpperCamelCase_ : dict , ) -> list: '''simple docstring''' _validation( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) # Creates data structures and fill initial step lowerCAmelCase__ = {} lowerCAmelCase__ = {} for state in states_space: lowerCAmelCase__ = observations_space[0] lowerCAmelCase__ = ( initial_probabilities[state] * emission_probabilities[state][observation] ) lowerCAmelCase__ = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(UpperCamelCase_ ) ): lowerCAmelCase__ = observations_space[o] lowerCAmelCase__ = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function lowerCAmelCase__ = "" lowerCAmelCase__ = -1 for k_state in states_space: lowerCAmelCase__ = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: lowerCAmelCase__ = probability lowerCAmelCase__ = k_state # Update probabilities and pointers dicts lowerCAmelCase__ = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) lowerCAmelCase__ = arg_max # The final observation lowerCAmelCase__ = observations_space[len(UpperCamelCase_ ) - 1] # argmax for given final observation lowerCAmelCase__ = "" lowerCAmelCase__ = -1 for k_state in states_space: lowerCAmelCase__ = probabilities[(k_state, final_observation)] if probability > max_probability: lowerCAmelCase__ = probability lowerCAmelCase__ = k_state lowerCAmelCase__ = arg_max # Process pointers backwards lowerCAmelCase__ = last_state lowerCAmelCase__ = [] for o in range(len(UpperCamelCase_ ) - 1 , -1 , -1 ): result.append(UpperCamelCase_ ) lowerCAmelCase__ = pointers[previous, observations_space[o]] result.reverse() return result def A ( UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any , ) -> None: '''simple docstring''' _validate_not_empty( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) _validate_lists(UpperCamelCase_ , UpperCamelCase_ ) _validate_dicts( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def A ( UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any , ) -> None: '''simple docstring''' if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError("There's an empty parameter" ) def A ( UpperCamelCase_ : Any , UpperCamelCase_ : Any ) -> None: '''simple docstring''' _validate_list(UpperCamelCase_ , "observations_space" ) _validate_list(UpperCamelCase_ , "states_space" ) def A ( UpperCamelCase_ : Any , UpperCamelCase_ : str ) -> None: '''simple docstring''' if not isinstance(_object , UpperCamelCase_ ): lowerCAmelCase__ = F"""{var_name} must be a list""" raise ValueError(UpperCamelCase_ ) else: for x in _object: if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): lowerCAmelCase__ = F"""{var_name} must be a list of strings""" raise ValueError(UpperCamelCase_ ) def A ( UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any , ) -> None: '''simple docstring''' _validate_dict(UpperCamelCase_ , "initial_probabilities" , UpperCamelCase_ ) _validate_nested_dict(UpperCamelCase_ , "transition_probabilities" ) _validate_nested_dict(UpperCamelCase_ , "emission_probabilities" ) def A ( UpperCamelCase_ : Any , UpperCamelCase_ : str ) -> None: '''simple docstring''' _validate_dict(_object , UpperCamelCase_ , UpperCamelCase_ ) for x in _object.values(): _validate_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def A ( UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : type , UpperCamelCase_ : bool = False ) -> None: '''simple docstring''' if not isinstance(_object , UpperCamelCase_ ): lowerCAmelCase__ = F"""{var_name} must be a dict""" raise ValueError(UpperCamelCase_ ) if not all(isinstance(UpperCamelCase_ , UpperCamelCase_ ) for x in _object ): lowerCAmelCase__ = F"""{var_name} all keys must be strings""" raise ValueError(UpperCamelCase_ ) if not all(isinstance(UpperCamelCase_ , UpperCamelCase_ ) for x in _object.values() ): lowerCAmelCase__ = "nested dictionary " if nested else "" lowerCAmelCase__ = F"""{var_name} {nested_text}all values must be {value_type.__name__}""" raise ValueError(UpperCamelCase_ ) if __name__ == "__main__": from doctest import testmod testmod()
48
'''simple docstring''' from random import randint from tempfile import TemporaryFile import numpy as np def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] ) -> Dict: '''simple docstring''' lowerCAmelCase__ = 0 if start < end: lowerCAmelCase__ = randint(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ = a[end] lowerCAmelCase__ = a[pivot] lowerCAmelCase__ = temp lowerCAmelCase__ ,lowerCAmelCase__ = _in_place_partition(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) count += _in_place_quick_sort(UpperCamelCase_ , UpperCamelCase_ , p - 1 ) count += _in_place_quick_sort(UpperCamelCase_ , p + 1 , UpperCamelCase_ ) return count def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any ) -> Dict: '''simple docstring''' lowerCAmelCase__ = 0 lowerCAmelCase__ = randint(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ = a[end] lowerCAmelCase__ = a[pivot] lowerCAmelCase__ = temp lowerCAmelCase__ = start - 1 for index in range(UpperCamelCase_ , UpperCamelCase_ ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value lowerCAmelCase__ = new_pivot_index + 1 lowerCAmelCase__ = a[new_pivot_index] lowerCAmelCase__ = a[index] lowerCAmelCase__ = temp lowerCAmelCase__ = a[new_pivot_index + 1] lowerCAmelCase__ = a[end] lowerCAmelCase__ = temp return new_pivot_index + 1, count UpperCAmelCase__ : Tuple = TemporaryFile() UpperCAmelCase__ : List[str] = 1_00 # 1000 elements are to be sorted UpperCAmelCase__ , UpperCAmelCase__ : Dict = 0, 1 # mean and standard deviation UpperCAmelCase__ : Tuple = np.random.normal(mu, sigma, p) np.save(outfile, X) print("The array is") print(X) outfile.seek(0) # using the same array UpperCAmelCase__ : Optional[Any] = np.load(outfile) UpperCAmelCase__ : Any = len(M) - 1 UpperCAmelCase__ : Tuple = _in_place_quick_sort(M, 0, r) print( "No of Comparisons for 100 elements selected from a standard normal distribution" "is :" ) print(z)
48
1
'''simple docstring''' from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class A ( SCREAMING_SNAKE_CASE__ ): def __init__( self : Any , __magic_name__ : pyspark.sql.DataFrame , __magic_name__ : Optional[NamedSplit] = None , __magic_name__ : Optional[Features] = None , __magic_name__ : bool = True , __magic_name__ : str = None , __magic_name__ : bool = False , __magic_name__ : str = None , __magic_name__ : bool = True , __magic_name__ : str = "arrow" , **__magic_name__ : int , ): """simple docstring""" super().__init__( split=__magic_name__ , features=__magic_name__ , cache_dir=__magic_name__ , keep_in_memory=__magic_name__ , streaming=__magic_name__ , **__magic_name__ , ) lowerCAmelCase__ = load_from_cache_file lowerCAmelCase__ = file_format lowerCAmelCase__ = Spark( df=__magic_name__ , features=__magic_name__ , cache_dir=__magic_name__ , working_dir=__magic_name__ , **__magic_name__ , ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) lowerCAmelCase__ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=__magic_name__ , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
48
'''simple docstring''' import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def A ( UpperCamelCase_ : List[Any] ) -> Tuple: '''simple docstring''' if "img_encoder.pos_embed" in name: lowerCAmelCase__ = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" ) if "img_encoder.patch_embed.proj" in name: lowerCAmelCase__ = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" ) if "img_encoder.patch_embed.norm" in name: lowerCAmelCase__ = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" ) if "img_encoder.layers" in name: lowerCAmelCase__ = name.replace("img_encoder.layers" , "vision_model.encoder.stages" ) if "blocks" in name and "res" not in name: lowerCAmelCase__ = name.replace("blocks" , "layers" ) if "attn" in name and "pre_assign" not in name: lowerCAmelCase__ = name.replace("attn" , "self_attn" ) if "proj" in name and "self_attn" in name and "text" not in name: lowerCAmelCase__ = name.replace("proj" , "out_proj" ) if "pre_assign_attn.attn.proj" in name: lowerCAmelCase__ = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" ) if "norm1" in name: lowerCAmelCase__ = name.replace("norm1" , "layer_norm1" ) if "norm2" in name and "pre_assign" not in name: lowerCAmelCase__ = name.replace("norm2" , "layer_norm2" ) if "img_encoder.norm" in name: lowerCAmelCase__ = name.replace("img_encoder.norm" , "vision_model.layernorm" ) # text encoder if "text_encoder.token_embedding" in name: lowerCAmelCase__ = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" ) if "text_encoder.positional_embedding" in name: lowerCAmelCase__ = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" ) if "text_encoder.transformer.resblocks." in name: lowerCAmelCase__ = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." ) if "ln_1" in name: lowerCAmelCase__ = name.replace("ln_1" , "layer_norm1" ) if "ln_2" in name: lowerCAmelCase__ = name.replace("ln_2" , "layer_norm2" ) if "c_fc" in name: lowerCAmelCase__ = name.replace("c_fc" , "fc1" ) if "c_proj" in name: lowerCAmelCase__ = name.replace("c_proj" , "fc2" ) if "text_encoder" in name: lowerCAmelCase__ = name.replace("text_encoder" , "text_model" ) if "ln_final" in name: lowerCAmelCase__ = name.replace("ln_final" , "final_layer_norm" ) # projection layers if "img_projector.linear_hidden." in name: lowerCAmelCase__ = name.replace("img_projector.linear_hidden." , "visual_projection." ) if "img_projector.linear_out." in name: lowerCAmelCase__ = name.replace("img_projector.linear_out." , "visual_projection.3." ) if "text_projector.linear_hidden" in name: lowerCAmelCase__ = name.replace("text_projector.linear_hidden" , "text_projection" ) if "text_projector.linear_out" in name: lowerCAmelCase__ = name.replace("text_projector.linear_out" , "text_projection.3" ) return name def A ( UpperCamelCase_ : str , UpperCamelCase_ : str ) -> List[Any]: '''simple docstring''' for key in orig_state_dict.copy().keys(): lowerCAmelCase__ = orig_state_dict.pop(UpperCamelCase_ ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors lowerCAmelCase__ = key.split("." ) lowerCAmelCase__ ,lowerCAmelCase__ = int(key_split[2] ), int(key_split[4] ) lowerCAmelCase__ = config.vision_config.hidden_size if "weight" in key: lowerCAmelCase__ = val[:dim, :] lowerCAmelCase__ = val[dim : dim * 2, :] lowerCAmelCase__ = val[-dim:, :] else: lowerCAmelCase__ = val[:dim] lowerCAmelCase__ = val[dim : dim * 2] lowerCAmelCase__ = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors lowerCAmelCase__ = key.split("." ) lowerCAmelCase__ = int(key_split[3] ) lowerCAmelCase__ = config.text_config.hidden_size if "weight" in key: lowerCAmelCase__ = val[:dim, :] lowerCAmelCase__ = val[ dim : dim * 2, : ] lowerCAmelCase__ = val[-dim:, :] else: lowerCAmelCase__ = val[:dim] lowerCAmelCase__ = val[dim : dim * 2] lowerCAmelCase__ = val[-dim:] else: lowerCAmelCase__ = rename_key(UpperCamelCase_ ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): lowerCAmelCase__ = val.squeeze_() else: lowerCAmelCase__ = val return orig_state_dict def A ( ) -> Optional[int]: '''simple docstring''' lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ) return im @torch.no_grad() def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple="groupvit-gcc-yfcc" , UpperCamelCase_ : Dict=False ) -> Any: '''simple docstring''' lowerCAmelCase__ = GroupViTConfig() lowerCAmelCase__ = GroupViTModel(UpperCamelCase_ ).eval() lowerCAmelCase__ = torch.load(UpperCamelCase_ , map_location="cpu" )["model"] lowerCAmelCase__ = convert_state_dict(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ ,lowerCAmelCase__ = model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCamelCase_ ) == 0) # verify result lowerCAmelCase__ = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" ) lowerCAmelCase__ = prepare_img() lowerCAmelCase__ = processor(text=["a photo of a cat", "a photo of a dog"] , images=UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors="pt" ) with torch.no_grad(): lowerCAmelCase__ = model(**UpperCamelCase_ ) if model_name == "groupvit-gcc-yfcc": lowerCAmelCase__ = torch.tensor([[13.3_523, 6.3_629]] ) elif model_name == "groupvit-gcc-redcaps": lowerCAmelCase__ = torch.tensor([[16.1_873, 8.6_230]] ) else: raise ValueError(F"""Model name {model_name} not supported.""" ) assert torch.allclose(outputs.logits_per_image , UpperCamelCase_ , atol=1E-3 ) processor.save_pretrained(UpperCamelCase_ ) model.save_pretrained(UpperCamelCase_ ) print("Successfully saved processor and model to" , UpperCamelCase_ ) if push_to_hub: print("Pushing to the hub..." ) processor.push_to_hub(UpperCamelCase_ , organization="nielsr" ) model.push_to_hub(UpperCamelCase_ , organization="nielsr" ) if __name__ == "__main__": UpperCAmelCase__ : List[str] = argparse.ArgumentParser() parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model." ) parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint") parser.add_argument( "--model_name", default="groupvit-gccy-fcc", type=str, help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'", ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.", ) UpperCAmelCase__ : Any = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
48
1
'''simple docstring''' from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :List[str] = ['vqvae'] def __init__( self : Optional[Any] , __magic_name__ : AutoencoderKL , __magic_name__ : UNetaDConditionModel , __magic_name__ : Mel , __magic_name__ : Union[DDIMScheduler, DDPMScheduler] , ): """simple docstring""" super().__init__() self.register_modules(unet=__magic_name__ , scheduler=__magic_name__ , mel=__magic_name__ , vqvae=__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" return 50 if isinstance(self.scheduler , __magic_name__ ) else 1000 @torch.no_grad() def __call__( self : Union[str, Any] , __magic_name__ : int = 1 , __magic_name__ : str = None , __magic_name__ : np.ndarray = None , __magic_name__ : int = 0 , __magic_name__ : int = 0 , __magic_name__ : int = None , __magic_name__ : torch.Generator = None , __magic_name__ : float = 0 , __magic_name__ : float = 0 , __magic_name__ : torch.Generator = None , __magic_name__ : float = 0 , __magic_name__ : torch.Tensor = None , __magic_name__ : torch.Tensor = None , __magic_name__ : Dict=True , ): """simple docstring""" lowerCAmelCase__ = steps or self.get_default_steps() self.scheduler.set_timesteps(__magic_name__ ) lowerCAmelCase__ = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: lowerCAmelCase__ = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: lowerCAmelCase__ = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=__magic_name__ , device=self.device , ) lowerCAmelCase__ = noise lowerCAmelCase__ = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = self.mel.audio_slice_to_image(__magic_name__ ) lowerCAmelCase__ = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape( (input_image.height, input_image.width) ) lowerCAmelCase__ = (input_image / 255) * 2 - 1 lowerCAmelCase__ = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: lowerCAmelCase__ = self.vqvae.encode(torch.unsqueeze(__magic_name__ , 0 ) ).latent_dist.sample( generator=__magic_name__ )[0] lowerCAmelCase__ = self.vqvae.config.scaling_factor * input_images if start_step > 0: lowerCAmelCase__ = self.scheduler.add_noise(__magic_name__ , __magic_name__ , self.scheduler.timesteps[start_step - 1] ) lowerCAmelCase__ = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) lowerCAmelCase__ = int(mask_start_secs * pixels_per_second ) lowerCAmelCase__ = int(mask_end_secs * pixels_per_second ) lowerCAmelCase__ = self.scheduler.add_noise(__magic_name__ , __magic_name__ , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , __magic_name__ ): lowerCAmelCase__ = self.unet(__magic_name__ , __magic_name__ , __magic_name__ )["sample"] else: lowerCAmelCase__ = self.unet(__magic_name__ , __magic_name__ )["sample"] if isinstance(self.scheduler , __magic_name__ ): lowerCAmelCase__ = self.scheduler.step( model_output=__magic_name__ , timestep=__magic_name__ , sample=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , )["prev_sample"] else: lowerCAmelCase__ = self.scheduler.step( model_output=__magic_name__ , timestep=__magic_name__ , sample=__magic_name__ , generator=__magic_name__ , )["prev_sample"] if mask is not None: if mask_start > 0: lowerCAmelCase__ = mask[:, step, :, :mask_start] if mask_end > 0: lowerCAmelCase__ = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance lowerCAmelCase__ = 1 / self.vqvae.config.scaling_factor * images lowerCAmelCase__ = self.vqvae.decode(__magic_name__ )["sample"] lowerCAmelCase__ = (images / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase__ = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() lowerCAmelCase__ = (images * 255).round().astype("uint8" ) lowerCAmelCase__ = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(__magic_name__ , mode="RGB" ).convert("L" ) for _ in images) ) lowerCAmelCase__ = [self.mel.image_to_audio(__magic_name__ ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(__magic_name__ )[:, np.newaxis, :] ) , **ImagePipelineOutput(__magic_name__ ) ) @torch.no_grad() def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[Image.Image] , __magic_name__ : int = 50 ): """simple docstring""" assert isinstance(self.scheduler , __magic_name__ ) self.scheduler.set_timesteps(__magic_name__ ) lowerCAmelCase__ = np.array( [np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] ) lowerCAmelCase__ = (sample / 255) * 2 - 1 lowerCAmelCase__ = torch.Tensor(__magic_name__ ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): lowerCAmelCase__ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps lowerCAmelCase__ = self.scheduler.alphas_cumprod[t] lowerCAmelCase__ = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) lowerCAmelCase__ = 1 - alpha_prod_t lowerCAmelCase__ = self.unet(__magic_name__ , __magic_name__ )["sample"] lowerCAmelCase__ = (1 - alpha_prod_t_prev) ** 0.5 * model_output lowerCAmelCase__ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) lowerCAmelCase__ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def __SCREAMING_SNAKE_CASE ( __magic_name__ : torch.Tensor , __magic_name__ : torch.Tensor , __magic_name__ : float ): """simple docstring""" lowerCAmelCase__ = acos(torch.dot(torch.flatten(__magic_name__ ) , torch.flatten(__magic_name__ ) ) / torch.norm(__magic_name__ ) / torch.norm(__magic_name__ ) ) return sin((1 - alpha) * theta ) * xa / sin(__magic_name__ ) + sin(alpha * theta ) * xa / sin(__magic_name__ )
48
'''simple docstring''' from __future__ import annotations from functools import lru_cache from math import ceil UpperCAmelCase__ : Optional[Any] = 1_00 UpperCAmelCase__ : Any = set(range(3, NUM_PRIMES, 2)) primes.add(2) UpperCAmelCase__ : int for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=1_00 ) def A ( UpperCamelCase_ : int ) -> set[int]: '''simple docstring''' if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} lowerCAmelCase__ = set() lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def A ( UpperCamelCase_ : int = 50_00 ) -> int | None: '''simple docstring''' for number_to_partition in range(1 , UpperCamelCase_ ): if len(partition(UpperCamelCase_ ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(F"{solution() = }")
48
1
'''simple docstring''' def A ( UpperCamelCase_ : Any ) -> Dict: '''simple docstring''' lowerCAmelCase__ = len(UpperCamelCase_ ) while cur > 1: # Find the maximum number in arr lowerCAmelCase__ = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi lowerCAmelCase__ = arr[mi::-1] + arr[mi + 1 : len(UpperCamelCase_ )] # Reverse whole list lowerCAmelCase__ = arr[cur - 1 :: -1] + arr[cur : len(UpperCamelCase_ )] cur -= 1 return arr if __name__ == "__main__": UpperCAmelCase__ : Any = input("Enter numbers separated by a comma:\n").strip() UpperCAmelCase__ : Optional[Any] = [int(item) for item in user_input.split(",")] print(pancake_sort(unsorted))
48
'''simple docstring''' import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase__ : List[Any] = logging.get_logger(__name__) UpperCAmelCase__ : List[str] = {"vocab_file": "vocab.json"} UpperCAmelCase__ : Optional[Any] = { "vocab_file": { "mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json", } } UpperCAmelCase__ : Union[str, Any] = {"mgp-str": 27} class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Any = VOCAB_FILES_NAMES snake_case__ :Dict = PRETRAINED_VOCAB_FILES_MAP snake_case__ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : int="[GO]" , __magic_name__ : Optional[Any]="[GO]" , __magic_name__ : List[str]="[s]" , __magic_name__ : str="[GO]" , **__magic_name__ : List[Any] ): """simple docstring""" super().__init__( unk_token=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , pad_token=__magic_name__ , **__magic_name__ , ) with open(__magic_name__ , encoding="utf-8" ) as vocab_handle: lowerCAmelCase__ = json.load(__magic_name__ ) lowerCAmelCase__ = {v: k for k, v in self.vocab.items()} @property def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" return len(self.vocab ) def __SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Dict ): """simple docstring""" lowerCAmelCase__ = [] for s in text: char_tokens.extend(__magic_name__ ) return char_tokens def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str ): """simple docstring""" return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) ) def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : Tuple ): """simple docstring""" return self.decoder.get(__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str , __magic_name__ : Optional[str] = None ): """simple docstring""" if not os.path.isdir(__magic_name__ ): logger.error("Vocabulary path ({}) should be a directory".format(__magic_name__ ) ) return lowerCAmelCase__ = os.path.join( __magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) with open(__magic_name__ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=__magic_name__ , ensure_ascii=__magic_name__ ) + "\n" ) return (vocab_file,)
48
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ : str = logging.get_logger(__name__) UpperCAmelCase__ : int = { "weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json", } class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :str = 'roc_bert' def __init__( self : int , __magic_name__ : Optional[int]=30522 , __magic_name__ : Optional[Any]=768 , __magic_name__ : Dict=12 , __magic_name__ : int=12 , __magic_name__ : Tuple=3072 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : List[Any]=0.1 , __magic_name__ : Dict=0.1 , __magic_name__ : Optional[Any]=512 , __magic_name__ : List[Any]=2 , __magic_name__ : Optional[Any]=0.02 , __magic_name__ : Tuple=1E-12 , __magic_name__ : List[Any]=True , __magic_name__ : List[Any]=0 , __magic_name__ : List[str]="absolute" , __magic_name__ : Dict=None , __magic_name__ : List[Any]=True , __magic_name__ : Optional[Any]=True , __magic_name__ : Dict=768 , __magic_name__ : int=910 , __magic_name__ : Optional[Any]=512 , __magic_name__ : int=24858 , __magic_name__ : Dict=True , **__magic_name__ : Union[str, Any] , ): """simple docstring""" lowerCAmelCase__ = vocab_size lowerCAmelCase__ = max_position_embeddings lowerCAmelCase__ = hidden_size lowerCAmelCase__ = num_hidden_layers lowerCAmelCase__ = num_attention_heads lowerCAmelCase__ = intermediate_size lowerCAmelCase__ = hidden_act lowerCAmelCase__ = hidden_dropout_prob lowerCAmelCase__ = attention_probs_dropout_prob lowerCAmelCase__ = initializer_range lowerCAmelCase__ = type_vocab_size lowerCAmelCase__ = layer_norm_eps lowerCAmelCase__ = use_cache lowerCAmelCase__ = enable_pronunciation lowerCAmelCase__ = enable_shape lowerCAmelCase__ = pronunciation_embed_dim lowerCAmelCase__ = pronunciation_vocab_size lowerCAmelCase__ = shape_embed_dim lowerCAmelCase__ = shape_vocab_size lowerCAmelCase__ = concat_input lowerCAmelCase__ = position_embedding_type lowerCAmelCase__ = classifier_dropout super().__init__(pad_token_id=__magic_name__ , **__magic_name__ )
48
'''simple docstring''' from math import sqrt def A ( UpperCamelCase_ : int ) -> int: '''simple docstring''' lowerCAmelCase__ = 0 for i in range(1 , int(sqrt(UpperCamelCase_ ) + 1 ) ): if n % i == 0 and i != sqrt(UpperCamelCase_ ): total += i + n // i elif i == sqrt(UpperCamelCase_ ): total += i return total - n def A ( UpperCamelCase_ : int = 1_00_00 ) -> int: '''simple docstring''' lowerCAmelCase__ = sum( i for i in range(1 , UpperCamelCase_ ) if sum_of_divisors(sum_of_divisors(UpperCamelCase_ ) ) == i and sum_of_divisors(UpperCamelCase_ ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
48
1
'''simple docstring''' from __future__ import annotations from functools import lru_cache from math import ceil UpperCAmelCase__ : Optional[Any] = 1_00 UpperCAmelCase__ : Any = set(range(3, NUM_PRIMES, 2)) primes.add(2) UpperCAmelCase__ : int for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=1_00 ) def A ( UpperCamelCase_ : int ) -> set[int]: '''simple docstring''' if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} lowerCAmelCase__ = set() lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def A ( UpperCamelCase_ : int = 50_00 ) -> int | None: '''simple docstring''' for number_to_partition in range(1 , UpperCamelCase_ ): if len(partition(UpperCamelCase_ ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(F"{solution() = }")
48
'''simple docstring''' import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format="%(message)s") def A ( UpperCamelCase_ : np.ndarray ) -> np.ndarray: '''simple docstring''' return input_array.reshape((input_array.size, 1) ) def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray: '''simple docstring''' lowerCAmelCase__ = np.nan for i in range(UpperCamelCase_ ): lowerCAmelCase__ = features[:, labels == i] lowerCAmelCase__ = data.mean(1 ) # Centralize the data of class i lowerCAmelCase__ = data - column_reshape(UpperCamelCase_ ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(UpperCamelCase_ , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) lowerCAmelCase__ = np.dot(UpperCamelCase_ , centered_data.T ) return covariance_sum / features.shape[1] def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray: '''simple docstring''' lowerCAmelCase__ = features.mean(1 ) lowerCAmelCase__ = np.nan for i in range(UpperCamelCase_ ): lowerCAmelCase__ = features[:, labels == i] lowerCAmelCase__ = data.shape[1] lowerCAmelCase__ = data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ ) , (column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) lowerCAmelCase__ = device_data * np.dot( column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ ) , (column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ )).T , ) return covariance_sum / features.shape[1] def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray: '''simple docstring''' if features.any(): lowerCAmelCase__ = features.mean(1 ) # Center the dataset lowerCAmelCase__ = features - np.reshape(UpperCamelCase_ , (data_mean.size, 1) ) lowerCAmelCase__ = np.dot(UpperCamelCase_ , centered_data.T ) / features.shape[1] lowerCAmelCase__ ,lowerCAmelCase__ = np.linalg.eigh(UpperCamelCase_ ) # Take all the columns in the reverse order (-1), and then takes only the first lowerCAmelCase__ = eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space lowerCAmelCase__ = np.dot(filtered_eigenvectors.T , UpperCamelCase_ ) logging.info("Principal Component Analysis computed" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=UpperCamelCase_ ) logging.error("Dataset empty" ) raise AssertionError def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> np.ndarray: '''simple docstring''' assert classes > dimensions # Check if features have been already loaded if features.any: lowerCAmelCase__ ,lowerCAmelCase__ = eigh( covariance_between_classes(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , covariance_within_classes(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , ) lowerCAmelCase__ = eigenvectors[:, ::-1][:, :dimensions] lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = np.linalg.svd(UpperCamelCase_ ) lowerCAmelCase__ = svd_matrix[:, 0:dimensions] lowerCAmelCase__ = np.dot(filtered_svd_matrix.T , UpperCamelCase_ ) logging.info("Linear Discriminant Analysis computed" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=UpperCamelCase_ ) logging.error("Dataset empty" ) raise AssertionError def A ( ) -> None: '''simple docstring''' lowerCAmelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) lowerCAmelCase__ = np.array([0, 0, 0, 1, 1] ) lowerCAmelCase__ = 2 lowerCAmelCase__ = 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(UpperCamelCase_ ) as error_info: lowerCAmelCase__ = linear_discriminant_analysis( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) if isinstance(UpperCamelCase_ , np.ndarray ): raise AssertionError( "Did not raise AssertionError for dimensions > classes" ) assert error_info.type is AssertionError def A ( ) -> None: '''simple docstring''' lowerCAmelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) lowerCAmelCase__ = 2 lowerCAmelCase__ = np.array([[6.92_820_323, 8.66_025_404, 10.39_230_485], [3.0, 3.0, 3.0]] ) with pytest.raises(UpperCamelCase_ ) as error_info: lowerCAmelCase__ = principal_component_analysis(UpperCamelCase_ , UpperCamelCase_ ) if not np.allclose(UpperCamelCase_ , UpperCamelCase_ ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
48
1
'''simple docstring''' from numpy import exp, pi, sqrt def A ( UpperCamelCase_ : int , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : float = 1.0 ) -> int: '''simple docstring''' return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
48
'''simple docstring''' def A ( UpperCamelCase_ : str , UpperCamelCase_ : int ) -> list: '''simple docstring''' lowerCAmelCase__ = word.split() def justify(UpperCamelCase_ : list , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> str: lowerCAmelCase__ = max_width - width lowerCAmelCase__ = len(UpperCamelCase_ ) if len(UpperCamelCase_ ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: lowerCAmelCase__ = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] lowerCAmelCase__ = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] lowerCAmelCase__ = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(UpperCamelCase_ ): num_spaces_between_words_list[i] += 1 lowerCAmelCase__ = [] for i in range(UpperCamelCase_ ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * " " ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(UpperCamelCase_ ) lowerCAmelCase__ = [] lowerCAmelCase__ = [] lowerCAmelCase__ = 0 for word in words: if width + len(UpperCamelCase_ ) + len(UpperCamelCase_ ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(UpperCamelCase_ ) width += len(UpperCamelCase_ ) else: # justify the line and add it to result answer.append(justify(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) ) # reset new line and new width lowerCAmelCase__ ,lowerCAmelCase__ = [word], len(UpperCamelCase_ ) lowerCAmelCase__ = max_width - width - len(UpperCamelCase_ ) answer.append(" ".join(UpperCamelCase_ ) + (remaining_spaces + 1) * " " ) return answer if __name__ == "__main__": from doctest import testmod testmod()
48
1
'''simple docstring''' def A ( UpperCamelCase_ : bytes ) -> str: '''simple docstring''' return "".join([hex(UpperCamelCase_ )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase_ )] ) def A ( UpperCamelCase_ : str ) -> bytes: '''simple docstring''' if (len(UpperCamelCase_ ) % 2) != 0: raise ValueError( "Base16 encoded data is invalid:\nData does not have an even number of hex digits." ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(UpperCamelCase_ ) <= set("0123456789ABCDEF" ): raise ValueError( "Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase_ ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
48
'''simple docstring''' import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 UpperCAmelCase__ : str = sys.version_info >= (3, 10) def A ( UpperCamelCase_ : Any=None , UpperCamelCase_ : List[Any]=None ) -> Optional[int]: '''simple docstring''' return field(default_factory=lambda: default , metadata=UpperCamelCase_ ) @dataclass class A : snake_case__ :int snake_case__ :float snake_case__ :str snake_case__ :bool @dataclass class A : snake_case__ :int = 42 snake_case__ :str = field(default='toto' , metadata={'help': 'help message'} ) @dataclass class A : snake_case__ :bool = False snake_case__ :bool = True snake_case__ :Optional[bool] = None class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Any = 'titi' snake_case__ :Optional[int] = 'toto' class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Union[str, Any] = 'titi' snake_case__ :str = 'toto' snake_case__ :int = 42 @dataclass class A : snake_case__ :BasicEnum = "toto" def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = BasicEnum(self.foo ) @dataclass class A : snake_case__ :MixedTypeEnum = "toto" def __SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" lowerCAmelCase__ = MixedTypeEnum(self.foo ) @dataclass class A : snake_case__ :Optional[int] = None snake_case__ :Optional[float] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} ) snake_case__ :Optional[str] = None snake_case__ :Optional[List[str]] = list_field(default=[] ) snake_case__ :Optional[List[int]] = list_field(default=[] ) @dataclass class A : snake_case__ :List[int] = list_field(default=[] ) snake_case__ :List[int] = list_field(default=[1, 2, 3] ) snake_case__ :List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) snake_case__ :List[float] = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class A : snake_case__ :List[int] = field() snake_case__ :str = field() snake_case__ :BasicEnum = field() def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" lowerCAmelCase__ = BasicEnum(self.required_enum ) @dataclass class A : snake_case__ :int snake_case__ :"BasicEnum" = field() snake_case__ :"Optional[bool]" = None snake_case__ :"str" = field(default='toto' , metadata={'help': 'help message'} ) snake_case__ :"List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) if is_python_no_less_than_3_10: @dataclass class A : snake_case__ :bool = False snake_case__ :bool = True snake_case__ :bool | None = None @dataclass class A : snake_case__ :int | None = None snake_case__ :float | None = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} ) snake_case__ :str | None = None snake_case__ :list[str] | None = list_field(default=[] ) snake_case__ :list[int] | None = list_field(default=[] ) class A ( unittest.TestCase ): def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : argparse.ArgumentParser , __magic_name__ : argparse.ArgumentParser ): """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): lowerCAmelCase__ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"} lowerCAmelCase__ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get("choices" , __magic_name__ ) and yy.get("choices" , __magic_name__ ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx["type"](__magic_name__ ) , yy["type"](__magic_name__ ) ) del xx["type"], yy["type"] self.assertEqual(__magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ ) expected.add_argument("--bar" , type=__magic_name__ , required=__magic_name__ ) expected.add_argument("--baz" , type=__magic_name__ , required=__magic_name__ ) expected.add_argument("--flag" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" ) self.argparsersEqual(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = ["--foo", "1", "--baz", "quux", "--bar", "0.5"] ((lowerCAmelCase__) ,) = parser.parse_args_into_dataclasses(__magic_name__ , look_for_args_file=__magic_name__ ) self.assertFalse(example.flag ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument("--foo" , default=42 , type=__magic_name__ ) expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" ) self.argparsersEqual(__magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument("--foo" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" ) expected.add_argument("--baz" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument("--no_baz" , action="store_false" , default=__magic_name__ , dest="baz" ) expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ ) lowerCAmelCase__ = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(__magic_name__ ) for dataclass_type in dataclass_types: lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) self.argparsersEqual(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = parser.parse_args([] ) self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) ) lowerCAmelCase__ = parser.parse_args(["--foo", "--no_baz"] ) self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) ) lowerCAmelCase__ = parser.parse_args(["--foo", "--baz"] ) self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) ) lowerCAmelCase__ = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] ) self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) ) lowerCAmelCase__ = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] ) self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument( "--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , ) self.argparsersEqual(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = parser.parse_args([] ) self.assertEqual(args.foo , "toto" ) lowerCAmelCase__ = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) lowerCAmelCase__ = parser.parse_args(["--foo", "titi"] ) self.assertEqual(args.foo , "titi" ) lowerCAmelCase__ = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) lowerCAmelCase__ = parser.parse_args(["--foo", "42"] ) self.assertEqual(args.foo , 42 ) lowerCAmelCase__ = parser.parse_args_into_dataclasses(["--foo", "42"] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def __SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" @dataclass class A : snake_case__ :Literal["titi", "toto", 42] = "toto" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument( "--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , ) self.argparsersEqual(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = parser.parse_args([] ) self.assertEqual(args.foo , "toto" ) lowerCAmelCase__ = parser.parse_args(["--foo", "titi"] ) self.assertEqual(args.foo , "titi" ) lowerCAmelCase__ = parser.parse_args(["--foo", "42"] ) self.assertEqual(args.foo , 42 ) def __SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument("--foo_int" , nargs="+" , default=[] , type=__magic_name__ ) expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=__magic_name__ ) expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ ) expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=__magic_name__ ) self.argparsersEqual(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = parser.parse_args([] ) self.assertEqual( __magic_name__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , ) lowerCAmelCase__ = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() ) self.assertEqual(__magic_name__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) ) def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument("--foo" , default=__magic_name__ , type=__magic_name__ ) expected.add_argument("--bar" , default=__magic_name__ , type=__magic_name__ , help="help message" ) expected.add_argument("--baz" , default=__magic_name__ , type=__magic_name__ ) expected.add_argument("--ces" , nargs="+" , default=[] , type=__magic_name__ ) expected.add_argument("--des" , nargs="+" , default=[] , type=__magic_name__ ) lowerCAmelCase__ = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(__magic_name__ ) for dataclass_type in dataclass_types: lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) self.argparsersEqual(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = parser.parse_args([] ) self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , bar=__magic_name__ , baz=__magic_name__ , ces=[] , des=[] ) ) lowerCAmelCase__ = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() ) self.assertEqual(__magic_name__ , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) ) def __SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument("--required_list" , nargs="+" , type=__magic_name__ , required=__magic_name__ ) expected.add_argument("--required_str" , type=__magic_name__ , required=__magic_name__ ) expected.add_argument( "--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , ) self.argparsersEqual(__magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ ) expected.add_argument( "--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , ) expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ ) expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" ) expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ ) self.argparsersEqual(__magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } lowerCAmelCase__ = parser.parse_dict(__magic_name__ )[0] lowerCAmelCase__ = BasicExample(**__magic_name__ ) self.assertEqual(__magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, "extra": 42, } self.assertRaises(__magic_name__ , parser.parse_dict , __magic_name__ , allow_extra_keys=__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase__ = os.path.join(__magic_name__ , "temp_json" ) os.mkdir(__magic_name__ ) with open(temp_local_path + ".json" , "w+" ) as f: json.dump(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0] lowerCAmelCase__ = BasicExample(**__magic_name__ ) self.assertEqual(__magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase__ = os.path.join(__magic_name__ , "temp_yaml" ) os.mkdir(__magic_name__ ) with open(temp_local_path + ".yaml" , "w+" ) as f: yaml.dump(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0] lowerCAmelCase__ = BasicExample(**__magic_name__ ) self.assertEqual(__magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) self.assertIsNotNone(__magic_name__ )
48
1
'''simple docstring''' import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer UpperCAmelCase__ : Optional[Any] = ["gpt2"] UpperCAmelCase__ : Union[str, Any] = "gpt2" if is_tf_available(): class A ( tf.Module ): def __init__( self : List[Any] , __magic_name__ : List[Any] ): """simple docstring""" super().__init__() lowerCAmelCase__ = tokenizer lowerCAmelCase__ = AutoConfig.from_pretrained(__magic_name__ ) lowerCAmelCase__ = TFGPTaLMHeadModel.from_config(__magic_name__ ) @tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : List[str] ): """simple docstring""" lowerCAmelCase__ = self.tokenizer(__magic_name__ ) lowerCAmelCase__ = tokenized["input_ids"].to_tensor() lowerCAmelCase__ = tf.cast(input_ids_dense > 0 , tf.intaa ) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) lowerCAmelCase__ = self.model(input_ids=__magic_name__ , attention_mask=__magic_name__ )["logits"] return outputs @require_tf @require_keras_nlp class A ( unittest.TestCase ): def __SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" super().setUp() lowerCAmelCase__ = [GPTaTokenizer.from_pretrained(__magic_name__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)] lowerCAmelCase__ = [TFGPTaTokenizer.from_pretrained(__magic_name__ ) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers ) == len(self.tf_tokenizers ) lowerCAmelCase__ = [ "This is a straightforward English test sentence.", "This one has some weird characters\rto\nsee\r\nif those\u00E9break things.", "Now we're going to add some Chinese: 一 二 三 一二三", "And some much more rare Chinese: 齉 堃 齉堃", "Je vais aussi écrire en français pour tester les accents", "Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ", ] lowerCAmelCase__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def __SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in self.test_sentences: lowerCAmelCase__ = tokenizer([test_inputs] , return_tensors="tf" ) lowerCAmelCase__ = tf_tokenizer([test_inputs] ) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors lowerCAmelCase__ = python_outputs[key].numpy() lowerCAmelCase__ = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) ) self.assertTrue(tf.reduce_all(tf.cast(__magic_name__ , tf.intaa ) == tf_outputs_values ) ) @slow def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" for tf_tokenizer in self.tf_tokenizers: lowerCAmelCase__ = tf.function(__magic_name__ ) for test_inputs in self.test_sentences: lowerCAmelCase__ = tf.constant(__magic_name__ ) lowerCAmelCase__ = compiled_tokenizer(__magic_name__ ) lowerCAmelCase__ = tf_tokenizer(__magic_name__ ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def __SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" for tf_tokenizer in self.tf_tokenizers: lowerCAmelCase__ = ModelToSave(tokenizer=__magic_name__ ) lowerCAmelCase__ = tf.convert_to_tensor([self.test_sentences[0]] ) lowerCAmelCase__ = model.serving(__magic_name__ ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: lowerCAmelCase__ = Path(__magic_name__ ) / "saved.model" tf.saved_model.save(__magic_name__ , __magic_name__ , signatures={"serving_default": model.serving} ) lowerCAmelCase__ = tf.saved_model.load(__magic_name__ ) lowerCAmelCase__ = loaded_model.signatures["serving_default"](__magic_name__ )["output_0"] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output ) ) @slow def __SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" for tf_tokenizer in self.tf_tokenizers: lowerCAmelCase__ = tf.convert_to_tensor([self.test_sentences[0]] ) lowerCAmelCase__ = tf_tokenizer(__magic_name__ ) # Build model with some sample inputs lowerCAmelCase__ = tf_tokenizer.get_config() lowerCAmelCase__ = TFGPTaTokenizer.from_config(__magic_name__ ) lowerCAmelCase__ = model_from_config(__magic_name__ ) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) ) @slow def __SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" for tf_tokenizer in self.tf_tokenizers: # for the test to run lowerCAmelCase__ = 123123 for max_length in [3, 5, 1024]: lowerCAmelCase__ = tf.convert_to_tensor([self.test_sentences[0]] ) lowerCAmelCase__ = tf_tokenizer(__magic_name__ , max_length=__magic_name__ ) lowerCAmelCase__ = out["input_ids"].numpy().shape[1] assert out_length == max_length
48
'''simple docstring''' import sys from collections import defaultdict class A : def __init__( self : Any ): """simple docstring""" lowerCAmelCase__ = [] def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[Any] ): """simple docstring""" return self.node_position[vertex] def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[str] , __magic_name__ : List[str] ): """simple docstring""" lowerCAmelCase__ = pos def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] ): """simple docstring""" if start > size // 2 - 1: return else: if 2 * start + 2 >= size: lowerCAmelCase__ = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: lowerCAmelCase__ = 2 * start + 1 else: lowerCAmelCase__ = 2 * start + 2 if heap[smallest_child] < heap[start]: lowerCAmelCase__ ,lowerCAmelCase__ = heap[smallest_child], positions[smallest_child] lowerCAmelCase__ ,lowerCAmelCase__ = ( heap[start], positions[start], ) lowerCAmelCase__ ,lowerCAmelCase__ = temp, tempa lowerCAmelCase__ = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , __magic_name__ ) self.top_to_bottom(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : List[str] ): """simple docstring""" lowerCAmelCase__ = position[index] while index != 0: lowerCAmelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: lowerCAmelCase__ = heap[parent] lowerCAmelCase__ = position[parent] self.set_position(position[parent] , __magic_name__ ) else: lowerCAmelCase__ = val lowerCAmelCase__ = temp self.set_position(__magic_name__ , __magic_name__ ) break lowerCAmelCase__ = parent else: lowerCAmelCase__ = val lowerCAmelCase__ = temp self.set_position(__magic_name__ , 0 ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : int ): """simple docstring""" lowerCAmelCase__ = len(__magic_name__ ) // 2 - 1 for i in range(__magic_name__ , -1 , -1 ): self.top_to_bottom(__magic_name__ , __magic_name__ , len(__magic_name__ ) , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ): """simple docstring""" lowerCAmelCase__ = positions[0] lowerCAmelCase__ = sys.maxsize self.top_to_bottom(__magic_name__ , 0 , len(__magic_name__ ) , __magic_name__ ) return temp def A ( UpperCamelCase_ : List[Any] ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase__ = Heap() lowerCAmelCase__ = [0] * len(UpperCamelCase_ ) lowerCAmelCase__ = [-1] * len(UpperCamelCase_ ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph lowerCAmelCase__ = [] # Heap of Distance of vertices from their neighboring vertex lowerCAmelCase__ = [] for vertex in range(len(UpperCamelCase_ ) ): distance_tv.append(sys.maxsize ) positions.append(UpperCamelCase_ ) heap.node_position.append(UpperCamelCase_ ) lowerCAmelCase__ = [] lowerCAmelCase__ = 1 lowerCAmelCase__ = sys.maxsize for neighbor, distance in adjacency_list[0]: lowerCAmelCase__ = 0 lowerCAmelCase__ = distance heap.heapify(UpperCamelCase_ , UpperCamelCase_ ) for _ in range(1 , len(UpperCamelCase_ ) ): lowerCAmelCase__ = heap.delete_minimum(UpperCamelCase_ , UpperCamelCase_ ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) lowerCAmelCase__ = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(UpperCamelCase_ )] ): lowerCAmelCase__ = distance heap.bottom_to_top( UpperCamelCase_ , heap.get_position(UpperCamelCase_ ) , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > UpperCAmelCase__ : Optional[int] = int(input("Enter number of edges: ").strip()) UpperCAmelCase__ : str = defaultdict(list) for _ in range(edges_number): UpperCAmelCase__ : int = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
48
1
'''simple docstring''' import random import sys import numpy as np from matplotlib import pyplot as plt from matplotlib.colors import ListedColormap UpperCAmelCase__ : List[str] = "Usage of script: script_name <size_of_canvas:int>" UpperCAmelCase__ : Dict = [0] * 1_00 + [1] * 10 random.shuffle(choice) def A ( UpperCamelCase_ : int ) -> list[list[bool]]: '''simple docstring''' lowerCAmelCase__ = [[False for i in range(UpperCamelCase_ )] for j in range(UpperCamelCase_ )] return canvas def A ( UpperCamelCase_ : list[list[bool]] ) -> None: '''simple docstring''' for i, row in enumerate(UpperCamelCase_ ): for j, _ in enumerate(UpperCamelCase_ ): lowerCAmelCase__ = bool(random.getrandbits(1 ) ) def A ( UpperCamelCase_ : list[list[bool]] ) -> list[list[bool]]: '''simple docstring''' lowerCAmelCase__ = np.array(UpperCamelCase_ ) lowerCAmelCase__ = np.array(create_canvas(current_canvas.shape[0] ) ) for r, row in enumerate(UpperCamelCase_ ): for c, pt in enumerate(UpperCamelCase_ ): lowerCAmelCase__ = __judge_point( UpperCamelCase_ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] ) lowerCAmelCase__ = next_gen_canvas del next_gen_canvas # cleaning memory as we move on. lowerCAmelCase__ = current_canvas.tolist() return return_canvas def A ( UpperCamelCase_ : bool , UpperCamelCase_ : list[list[bool]] ) -> bool: '''simple docstring''' lowerCAmelCase__ = 0 lowerCAmelCase__ = 0 # finding dead or alive neighbours count. for i in neighbours: for status in i: if status: alive += 1 else: dead += 1 # handling duplicate entry for focus pt. if pt: alive -= 1 else: dead -= 1 # running the rules of game here. lowerCAmelCase__ = pt if pt: if alive < 2: lowerCAmelCase__ = False elif alive == 2 or alive == 3: lowerCAmelCase__ = True elif alive > 3: lowerCAmelCase__ = False else: if alive == 3: lowerCAmelCase__ = True return state if __name__ == "__main__": if len(sys.argv) != 2: raise Exception(usage_doc) UpperCAmelCase__ : List[str] = int(sys.argv[1]) # main working structure of this module. UpperCAmelCase__ : Optional[int] = create_canvas(canvas_size) seed(c) UpperCAmelCase__ , UpperCAmelCase__ : List[str] = plt.subplots() fig.show() UpperCAmelCase__ : Tuple = ListedColormap(["w", "k"]) try: while True: UpperCAmelCase__ : Optional[int] = run(c) ax.matshow(c, cmap=cmap) fig.canvas.draw() ax.cla() except KeyboardInterrupt: # do nothing. pass
48
'''simple docstring''' import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase__ : Tuple = get_tests_dir("fixtures/test_sentencepiece.model") if is_sentencepiece_available(): import sentencepiece as sp UpperCAmelCase__ : Tuple = 5 UpperCAmelCase__ : List[Any] = 10 @require_sentencepiece @require_tokenizers class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): snake_case__ :Tuple = SpeechaTextTokenizer snake_case__ :Dict = False snake_case__ :Optional[int] = True def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" super().setUp() lowerCAmelCase__ = sp.SentencePieceProcessor() spm_model.Load(__magic_name__ ) lowerCAmelCase__ = ["<s>", "<pad>", "</s>", "<unk>"] vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__magic_name__ ) )] lowerCAmelCase__ = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) lowerCAmelCase__ = Path(self.tmpdirname ) save_json(__magic_name__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(__magic_name__ , save_dir / VOCAB_FILES_NAMES["spm_file"] ) lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def __SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" lowerCAmelCase__ = "<pad>" lowerCAmelCase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(__magic_name__ ) , 1001 ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1001 ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) lowerCAmelCase__ = tokenizer.tokenize("This is a test" ) self.assertListEqual(__magic_name__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__magic_name__ ) , [289, 50, 14, 174, 386] , ) lowerCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( __magic_name__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , ) lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(__magic_name__ ) self.assertListEqual(__magic_name__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] ) lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(__magic_name__ ) self.assertListEqual( __magic_name__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , ) @slow def __SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" lowerCAmelCase__ = {"input_ids": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__magic_name__ , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , ) @require_sentencepiece class A ( unittest.TestCase ): snake_case__ :Union[str, Any] = 'valhalla/s2t_mustc_multilinguial_medium' snake_case__ :Tuple = 'C\'est trop cool' snake_case__ :List[str] = 'Esto es genial' @classmethod def __SCREAMING_SNAKE_CASE ( cls : List[Any] ): """simple docstring""" lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name ) return cls def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 ) self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 ) self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 ) self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 ) def __SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" self.assertEqual(self.tokenizer.vocab_size , 10000 ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" self.assertIn(__magic_name__ , self.tokenizer.all_special_ids ) lowerCAmelCase__ = [ES_CODE, 4, 1601, 47, 7647, 2] lowerCAmelCase__ = self.tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ ) lowerCAmelCase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__magic_name__ ) self.assertEqual(__magic_name__ , __magic_name__ ) self.assertNotIn(self.tokenizer.eos_token , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" lowerCAmelCase__ = "fr" lowerCAmelCase__ = self.tokenizer(self.french_text ).input_ids self.assertEqual(encoded[0] , __magic_name__ ) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id ) def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" lowerCAmelCase__ = "fr" self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] ) lowerCAmelCase__ = "es" self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
48
1
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType UpperCAmelCase__ : List[str] = logging.get_logger(__name__) UpperCAmelCase__ : Optional[Any] = { "microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json", "microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json", "microsoft/deberta-v2-xlarge-mnli": ( "https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json" ), "microsoft/deberta-v2-xxlarge-mnli": ( "https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json" ), } class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :int = 'deberta-v2' def __init__( self : Tuple , __magic_name__ : List[Any]=128100 , __magic_name__ : Tuple=1536 , __magic_name__ : int=24 , __magic_name__ : List[Any]=24 , __magic_name__ : Dict=6144 , __magic_name__ : Union[str, Any]="gelu" , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : Tuple=512 , __magic_name__ : Dict=0 , __magic_name__ : Optional[int]=0.02 , __magic_name__ : Union[str, Any]=1E-7 , __magic_name__ : str=False , __magic_name__ : Any=-1 , __magic_name__ : List[str]=0 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Tuple=None , __magic_name__ : Any=0 , __magic_name__ : Optional[int]="gelu" , **__magic_name__ : int , ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = hidden_size lowerCAmelCase__ = num_hidden_layers lowerCAmelCase__ = num_attention_heads lowerCAmelCase__ = intermediate_size lowerCAmelCase__ = hidden_act lowerCAmelCase__ = hidden_dropout_prob lowerCAmelCase__ = attention_probs_dropout_prob lowerCAmelCase__ = max_position_embeddings lowerCAmelCase__ = type_vocab_size lowerCAmelCase__ = initializer_range lowerCAmelCase__ = relative_attention lowerCAmelCase__ = max_relative_positions lowerCAmelCase__ = pad_token_id lowerCAmelCase__ = position_biased_input # Backwards compatibility if type(__magic_name__ ) == str: lowerCAmelCase__ = [x.strip() for x in pos_att_type.lower().split("|" )] lowerCAmelCase__ = pos_att_type lowerCAmelCase__ = vocab_size lowerCAmelCase__ = layer_norm_eps lowerCAmelCase__ = kwargs.get("pooler_hidden_size" , __magic_name__ ) lowerCAmelCase__ = pooler_dropout lowerCAmelCase__ = pooler_hidden_act class A ( SCREAMING_SNAKE_CASE__ ): @property def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" if self.task == "multiple-choice": lowerCAmelCase__ = {0: "batch", 1: "choice", 2: "sequence"} else: lowerCAmelCase__ = {0: "batch", 1: "sequence"} if self._config.type_vocab_size > 0: return OrderedDict( [("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] ) else: return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] ) @property def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" return 12 def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : int = -1 , __magic_name__ : bool = False , __magic_name__ : Optional["TensorType"] = None , __magic_name__ : int = 3 , __magic_name__ : int = 40 , __magic_name__ : int = 40 , __magic_name__ : "PreTrainedTokenizerBase" = None , ): """simple docstring""" lowerCAmelCase__ = super().generate_dummy_inputs(preprocessor=__magic_name__ , framework=__magic_name__ ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
48
'''simple docstring''' from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig UpperCAmelCase__ : Tuple = logging.get_logger(__name__) # General docstring UpperCAmelCase__ : int = "RegNetConfig" # Base docstring UpperCAmelCase__ : Optional[int] = "facebook/regnet-y-040" UpperCAmelCase__ : Optional[int] = [1, 10_88, 7, 7] # Image classification docstring UpperCAmelCase__ : Tuple = "facebook/regnet-y-040" UpperCAmelCase__ : Optional[Any] = "tabby, tabby cat" UpperCAmelCase__ : int = [ "facebook/regnet-y-040", # See all regnet models at https://huggingface.co/models?filter=regnet ] class A ( tf.keras.layers.Layer ): def __init__( self : str , __magic_name__ : int , __magic_name__ : int = 3 , __magic_name__ : int = 1 , __magic_name__ : int = 1 , __magic_name__ : Optional[str] = "relu" , **__magic_name__ : int , ): """simple docstring""" super().__init__(**__magic_name__ ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb lowerCAmelCase__ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) lowerCAmelCase__ = tf.keras.layers.ConvaD( filters=__magic_name__ , kernel_size=__magic_name__ , strides=__magic_name__ , padding="VALID" , groups=__magic_name__ , use_bias=__magic_name__ , name="convolution" , ) lowerCAmelCase__ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) lowerCAmelCase__ = ACTaFN[activation] if activation is not None else tf.identity def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ): """simple docstring""" lowerCAmelCase__ = self.convolution(self.padding(__magic_name__ ) ) lowerCAmelCase__ = self.normalization(__magic_name__ ) lowerCAmelCase__ = self.activation(__magic_name__ ) return hidden_state class A ( tf.keras.layers.Layer ): def __init__( self : List[Any] , __magic_name__ : RegNetConfig , **__magic_name__ : str ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = config.num_channels lowerCAmelCase__ = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , ) def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : List[Any] ): """simple docstring""" lowerCAmelCase__ = shape_list(__magic_name__ )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 2, 3, 1) ) lowerCAmelCase__ = self.embedder(__magic_name__ ) return hidden_state class A ( tf.keras.layers.Layer ): def __init__( self : Any , __magic_name__ : int , __magic_name__ : int = 2 , **__magic_name__ : Optional[Any] ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = tf.keras.layers.ConvaD( filters=__magic_name__ , kernel_size=1 , strides=__magic_name__ , use_bias=__magic_name__ , name="convolution" ) lowerCAmelCase__ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : tf.Tensor , __magic_name__ : bool = False ): """simple docstring""" return self.normalization(self.convolution(__magic_name__ ) , training=__magic_name__ ) class A ( tf.keras.layers.Layer ): def __init__( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : int , **__magic_name__ : List[Any] ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__magic_name__ , name="pooler" ) lowerCAmelCase__ = [ tf.keras.layers.ConvaD(filters=__magic_name__ , kernel_size=1 , activation="relu" , name="attention.0" ), tf.keras.layers.ConvaD(filters=__magic_name__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ), ] def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = self.pooler(__magic_name__ ) for layer_module in self.attention: lowerCAmelCase__ = layer_module(__magic_name__ ) lowerCAmelCase__ = hidden_state * pooled return hidden_state class A ( tf.keras.layers.Layer ): def __init__( self : int , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 1 , **__magic_name__ : str ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = in_channels != out_channels or stride != 1 lowerCAmelCase__ = max(1 , out_channels // config.groups_width ) lowerCAmelCase__ = ( TFRegNetShortCut(__magic_name__ , stride=__magic_name__ , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. lowerCAmelCase__ = [ TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( __magic_name__ , stride=__magic_name__ , groups=__magic_name__ , activation=config.hidden_act , name="layer.1" ), TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=__magic_name__ , name="layer.2" ), ] lowerCAmelCase__ = ACTaFN[config.hidden_act] def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Any ): """simple docstring""" lowerCAmelCase__ = hidden_state for layer_module in self.layers: lowerCAmelCase__ = layer_module(__magic_name__ ) lowerCAmelCase__ = self.shortcut(__magic_name__ ) hidden_state += residual lowerCAmelCase__ = self.activation(__magic_name__ ) return hidden_state class A ( tf.keras.layers.Layer ): def __init__( self : int , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 1 , **__magic_name__ : str ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = in_channels != out_channels or stride != 1 lowerCAmelCase__ = max(1 , out_channels // config.groups_width ) lowerCAmelCase__ = ( TFRegNetShortCut(__magic_name__ , stride=__magic_name__ , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) lowerCAmelCase__ = [ TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( __magic_name__ , stride=__magic_name__ , groups=__magic_name__ , activation=config.hidden_act , name="layer.1" ), TFRegNetSELayer(__magic_name__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ), TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=__magic_name__ , name="layer.3" ), ] lowerCAmelCase__ = ACTaFN[config.hidden_act] def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : Any ): """simple docstring""" lowerCAmelCase__ = hidden_state for layer_module in self.layers: lowerCAmelCase__ = layer_module(__magic_name__ ) lowerCAmelCase__ = self.shortcut(__magic_name__ ) hidden_state += residual lowerCAmelCase__ = self.activation(__magic_name__ ) return hidden_state class A ( tf.keras.layers.Layer ): def __init__( self : Union[str, Any] , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 2 , __magic_name__ : int = 2 , **__magic_name__ : Optional[int] ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer lowerCAmelCase__ = [ # downsampling is done in the first layer with stride of 2 layer(__magic_name__ , __magic_name__ , __magic_name__ , stride=__magic_name__ , name="layers.0" ), *[layer(__magic_name__ , __magic_name__ , __magic_name__ , name=f"""layers.{i+1}""" ) for i in range(depth - 1 )], ] def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[str] ): """simple docstring""" for layer_module in self.layers: lowerCAmelCase__ = layer_module(__magic_name__ ) return hidden_state class A ( tf.keras.layers.Layer ): def __init__( self : Tuple , __magic_name__ : RegNetConfig , **__magic_name__ : Union[str, Any] ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( __magic_name__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) ) lowerCAmelCase__ = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(__magic_name__ , config.depths[1:] ) ): self.stages.append(TFRegNetStage(__magic_name__ , __magic_name__ , __magic_name__ , depth=__magic_name__ , name=f"""stages.{i+1}""" ) ) def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : tf.Tensor , __magic_name__ : bool = False , __magic_name__ : bool = True ): """simple docstring""" lowerCAmelCase__ = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: lowerCAmelCase__ = hidden_states + (hidden_state,) lowerCAmelCase__ = stage_module(__magic_name__ ) if output_hidden_states: lowerCAmelCase__ = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=__magic_name__ , hidden_states=__magic_name__ ) @keras_serializable class A ( tf.keras.layers.Layer ): snake_case__ :List[Any] = RegNetConfig def __init__( self : str , __magic_name__ : Union[str, Any] , **__magic_name__ : Union[str, Any] ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = config lowerCAmelCase__ = TFRegNetEmbeddings(__magic_name__ , name="embedder" ) lowerCAmelCase__ = TFRegNetEncoder(__magic_name__ , name="encoder" ) lowerCAmelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__magic_name__ , name="pooler" ) @unpack_inputs def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : tf.Tensor , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , ): """simple docstring""" lowerCAmelCase__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict lowerCAmelCase__ = self.embedder(__magic_name__ , training=__magic_name__ ) lowerCAmelCase__ = self.encoder( __magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ ) lowerCAmelCase__ = encoder_outputs[0] lowerCAmelCase__ = self.pooler(__magic_name__ ) # Change to NCHW output format have uniformity in the modules lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) ) lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: lowerCAmelCase__ = tuple([tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__magic_name__ , pooler_output=__magic_name__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :str = RegNetConfig snake_case__ :Optional[Any] = 'regnet' snake_case__ :Tuple = 'pixel_values' @property def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )} UpperCAmelCase__ : List[str] = R"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n" UpperCAmelCase__ : Tuple = R"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( 'The bare RegNet model outputting raw features without any specific head on top.' , SCREAMING_SNAKE_CASE__ , ) class A ( SCREAMING_SNAKE_CASE__ ): def __init__( self : Any , __magic_name__ : RegNetConfig , *__magic_name__ : Optional[int] , **__magic_name__ : Union[str, Any] ): """simple docstring""" super().__init__(__magic_name__ , *__magic_name__ , **__magic_name__ ) lowerCAmelCase__ = TFRegNetMainLayer(__magic_name__ , name="regnet" ) @unpack_inputs @add_start_docstrings_to_model_forward(__magic_name__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : tf.Tensor , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : int=False , ): """simple docstring""" lowerCAmelCase__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict lowerCAmelCase__ = self.regnet( pixel_values=__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( '\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , SCREAMING_SNAKE_CASE__ , ) class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): def __init__( self : Tuple , __magic_name__ : RegNetConfig , *__magic_name__ : Tuple , **__magic_name__ : Optional[int] ): """simple docstring""" super().__init__(__magic_name__ , *__magic_name__ , **__magic_name__ ) lowerCAmelCase__ = config.num_labels lowerCAmelCase__ = TFRegNetMainLayer(__magic_name__ , name="regnet" ) # classification head lowerCAmelCase__ = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(__magic_name__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : tf.Tensor = None , __magic_name__ : tf.Tensor = None , __magic_name__ : bool = None , __magic_name__ : bool = None , __magic_name__ : Dict=False , ): """simple docstring""" lowerCAmelCase__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict lowerCAmelCase__ = self.regnet( __magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ ) lowerCAmelCase__ = outputs.pooler_output if return_dict else outputs[1] lowerCAmelCase__ = self.classifier[0](__magic_name__ ) lowerCAmelCase__ = self.classifier[1](__magic_name__ ) lowerCAmelCase__ = None if labels is None else self.hf_compute_loss(labels=__magic_name__ , logits=__magic_name__ ) if not return_dict: lowerCAmelCase__ = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=__magic_name__ , logits=__magic_name__ , hidden_states=outputs.hidden_states )
48
1
'''simple docstring''' import unittest import numpy as np from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING from transformers.pipelines import AudioClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torchaudio, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class A ( unittest.TestCase ): snake_case__ :List[Any] = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING snake_case__ :Dict = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : int ): """simple docstring""" lowerCAmelCase__ = AudioClassificationPipeline(model=__magic_name__ , feature_extractor=__magic_name__ ) # test with a raw waveform lowerCAmelCase__ = np.zeros((34000,) ) lowerCAmelCase__ = np.zeros((14000,) ) return audio_classifier, [audioa, audio] def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ): """simple docstring""" lowerCAmelCase__ ,lowerCAmelCase__ = examples lowerCAmelCase__ = audio_classifier(__magic_name__ ) # by default a model is initialized with num_labels=2 self.assertEqual( __magic_name__ , [ {"score": ANY(__magic_name__ ), "label": ANY(__magic_name__ )}, {"score": ANY(__magic_name__ ), "label": ANY(__magic_name__ )}, ] , ) lowerCAmelCase__ = audio_classifier(__magic_name__ , top_k=1 ) self.assertEqual( __magic_name__ , [ {"score": ANY(__magic_name__ ), "label": ANY(__magic_name__ )}, ] , ) self.run_torchaudio(__magic_name__ ) @require_torchaudio def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[Any] ): """simple docstring""" import datasets # test with a local file lowerCAmelCase__ = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) lowerCAmelCase__ = dataset[0]["audio"]["array"] lowerCAmelCase__ = audio_classifier(__magic_name__ ) self.assertEqual( __magic_name__ , [ {"score": ANY(__magic_name__ ), "label": ANY(__magic_name__ )}, {"score": ANY(__magic_name__ ), "label": ANY(__magic_name__ )}, ] , ) @require_torch def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" lowerCAmelCase__ = "anton-l/wav2vec2-random-tiny-classifier" lowerCAmelCase__ = pipeline("audio-classification" , model=__magic_name__ ) lowerCAmelCase__ = np.ones((8000,) ) lowerCAmelCase__ = audio_classifier(__magic_name__ , top_k=4 ) lowerCAmelCase__ = [ {"score": 0.0842, "label": "no"}, {"score": 0.0838, "label": "up"}, {"score": 0.0837, "label": "go"}, {"score": 0.0834, "label": "right"}, ] lowerCAmelCase__ = [ {"score": 0.0845, "label": "stop"}, {"score": 0.0844, "label": "on"}, {"score": 0.0841, "label": "right"}, {"score": 0.0834, "label": "left"}, ] self.assertIn(nested_simplify(__magic_name__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) lowerCAmelCase__ = {"array": np.ones((8000,) ), "sampling_rate": audio_classifier.feature_extractor.sampling_rate} lowerCAmelCase__ = audio_classifier(__magic_name__ , top_k=4 ) self.assertIn(nested_simplify(__magic_name__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) @require_torch @slow def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" import datasets lowerCAmelCase__ = "superb/wav2vec2-base-superb-ks" lowerCAmelCase__ = pipeline("audio-classification" , model=__magic_name__ ) lowerCAmelCase__ = datasets.load_dataset("anton-l/superb_dummy" , "ks" , split="test" ) lowerCAmelCase__ = np.array(dataset[3]["speech"] , dtype=np.floataa ) lowerCAmelCase__ = audio_classifier(__magic_name__ , top_k=4 ) self.assertEqual( nested_simplify(__magic_name__ , decimals=3 ) , [ {"score": 0.981, "label": "go"}, {"score": 0.007, "label": "up"}, {"score": 0.006, "label": "_unknown_"}, {"score": 0.001, "label": "down"}, ] , ) @require_tf @unittest.skip("Audio classification is not implemented for TF" ) def __SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" pass
48
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def A ( UpperCamelCase_ : Tuple ) -> int: '''simple docstring''' for param in module.parameters(): lowerCAmelCase__ = False def A ( ) -> Tuple: '''simple docstring''' lowerCAmelCase__ = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): lowerCAmelCase__ = "mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def A ( UpperCamelCase_ : Optional[int] ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase__ = plt.imshow(UpperCamelCase_ ) fig.axes.get_xaxis().set_visible(UpperCamelCase_ ) fig.axes.get_yaxis().set_visible(UpperCamelCase_ ) plt.show() def A ( ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase__ = datetime.now() lowerCAmelCase__ = current_time.strftime("%H:%M:%S" ) return timestamp
48
1
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. UpperCAmelCase__ : Optional[int] = abspath(join(dirname(dirname(dirname(__file__))), "src")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="ignore", category=FutureWarning) def A ( UpperCamelCase_ : Tuple ) -> Optional[int]: '''simple docstring''' from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(UpperCamelCase_ ) def A ( UpperCamelCase_ : Tuple ) -> List[Any]: '''simple docstring''' from transformers.testing_utils import pytest_terminal_summary_main lowerCAmelCase__ = terminalreporter.config.getoption("--make-reports" ) if make_reports: pytest_terminal_summary_main(UpperCamelCase_ , id=UpperCamelCase_ )
48
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCAmelCase__ : List[Any] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Union[str, Any] = ["EncoderDecoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Optional[int] = ["TFEncoderDecoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Optional[Any] = ["FlaxEncoderDecoderModel"] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys UpperCAmelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
48
1
'''simple docstring''' def A ( UpperCamelCase_ : Optional[int] ) -> str: '''simple docstring''' lowerCAmelCase__ = [] lowerCAmelCase__ = set({"(", "[", "{"} ) lowerCAmelCase__ = set({")", "]", "}"} ) lowerCAmelCase__ = {"{": "}", "[": "]", "(": ")"} for i in range(len(UpperCamelCase_ ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(UpperCamelCase_ ) == 0 or (len(UpperCamelCase_ ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(UpperCamelCase_ ) == 0 def A ( ) -> List[str]: '''simple docstring''' lowerCAmelCase__ = input("Enter sequence of brackets: " ) if is_balanced(UpperCamelCase_ ): print(UpperCamelCase_ , "is balanced" ) else: print(UpperCamelCase_ , "is not balanced" ) if __name__ == "__main__": main()
48
'''simple docstring''' import argparse from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird from transformers.utils import logging logging.set_verbosity_info() def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : int ) -> Any: '''simple docstring''' lowerCAmelCase__ = BigBirdConfig.from_json_file(UpperCamelCase_ ) print(F"""Building PyTorch model from configuration: {config}""" ) if is_trivia_qa: lowerCAmelCase__ = BigBirdForQuestionAnswering(UpperCamelCase_ ) else: lowerCAmelCase__ = BigBirdForPreTraining(UpperCamelCase_ ) # Load weights from tf checkpoint load_tf_weights_in_big_bird(UpperCamelCase_ , UpperCamelCase_ , is_trivia_qa=UpperCamelCase_ ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(UpperCamelCase_ ) if __name__ == "__main__": UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--big_bird_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head." ) UpperCAmelCase__ : int = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa )
48
1
'''simple docstring''' import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging UpperCAmelCase__ : Dict = logging.get_logger(__name__) class A ( SCREAMING_SNAKE_CASE__ ): def __init__( self : Optional[int] , __magic_name__ : Union[List[ControlNetModel], Tuple[ControlNetModel]] ): """simple docstring""" super().__init__() lowerCAmelCase__ = nn.ModuleList(__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : torch.FloatTensor , __magic_name__ : Union[torch.Tensor, float, int] , __magic_name__ : torch.Tensor , __magic_name__ : List[torch.tensor] , __magic_name__ : List[float] , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[Dict[str, Any]] = None , __magic_name__ : bool = False , __magic_name__ : bool = True , ): """simple docstring""" for i, (image, scale, controlnet) in enumerate(zip(__magic_name__ , __magic_name__ , self.nets ) ): lowerCAmelCase__ ,lowerCAmelCase__ = controlnet( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) # merge samples if i == 0: lowerCAmelCase__ ,lowerCAmelCase__ = down_samples, mid_sample else: lowerCAmelCase__ = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(__magic_name__ , __magic_name__ ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Union[str, os.PathLike] , __magic_name__ : bool = True , __magic_name__ : Callable = None , __magic_name__ : bool = False , __magic_name__ : Optional[str] = None , ): """simple docstring""" lowerCAmelCase__ = 0 lowerCAmelCase__ = save_directory for controlnet in self.nets: controlnet.save_pretrained( __magic_name__ , is_main_process=__magic_name__ , save_function=__magic_name__ , safe_serialization=__magic_name__ , variant=__magic_name__ , ) idx += 1 lowerCAmelCase__ = model_path_to_save + f"""_{idx}""" @classmethod def __SCREAMING_SNAKE_CASE ( cls : str , __magic_name__ : Optional[Union[str, os.PathLike]] , **__magic_name__ : Tuple ): """simple docstring""" lowerCAmelCase__ = 0 lowerCAmelCase__ = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... lowerCAmelCase__ = pretrained_model_path while os.path.isdir(__magic_name__ ): lowerCAmelCase__ = ControlNetModel.from_pretrained(__magic_name__ , **__magic_name__ ) controlnets.append(__magic_name__ ) idx += 1 lowerCAmelCase__ = pretrained_model_path + f"""_{idx}""" logger.info(f"""{len(__magic_name__ )} controlnets loaded from {pretrained_model_path}.""" ) if len(__magic_name__ ) == 0: raise ValueError( f"""No ControlNets found under {os.path.dirname(__magic_name__ )}. Expected at least {pretrained_model_path + "_0"}.""" ) return cls(__magic_name__ )
48
'''simple docstring''' from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class A : def __init__( self : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : str=13 , __magic_name__ : List[str]=7 , __magic_name__ : Tuple=True , __magic_name__ : Tuple=True , __magic_name__ : str=True , __magic_name__ : int=True , __magic_name__ : int=99 , __magic_name__ : List[str]=[1, 1, 2] , __magic_name__ : Dict=1 , __magic_name__ : Tuple=32 , __magic_name__ : Any=4 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[Any]=37 , __magic_name__ : Tuple="gelu_new" , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : Tuple=0.0 , __magic_name__ : int=512 , __magic_name__ : Optional[int]=3 , __magic_name__ : List[str]=0.02 , __magic_name__ : Dict=3 , __magic_name__ : List[Any]=4 , __magic_name__ : Any=None , __magic_name__ : Dict=False , ): """simple docstring""" lowerCAmelCase__ = parent lowerCAmelCase__ = batch_size lowerCAmelCase__ = seq_length lowerCAmelCase__ = is_training lowerCAmelCase__ = use_input_mask lowerCAmelCase__ = use_token_type_ids lowerCAmelCase__ = use_labels lowerCAmelCase__ = vocab_size lowerCAmelCase__ = block_sizes lowerCAmelCase__ = num_decoder_layers lowerCAmelCase__ = d_model lowerCAmelCase__ = n_head lowerCAmelCase__ = d_head lowerCAmelCase__ = d_inner lowerCAmelCase__ = hidden_act lowerCAmelCase__ = hidden_dropout lowerCAmelCase__ = attention_dropout lowerCAmelCase__ = activation_dropout lowerCAmelCase__ = max_position_embeddings lowerCAmelCase__ = type_vocab_size lowerCAmelCase__ = 2 lowerCAmelCase__ = num_labels lowerCAmelCase__ = num_choices lowerCAmelCase__ = scope lowerCAmelCase__ = initializer_std # Used in the tests to check the size of the first attention layer lowerCAmelCase__ = n_head # Used in the tests to check the size of the first hidden state lowerCAmelCase__ = self.d_model # Used in the tests to check the number of output hidden states/attentions lowerCAmelCase__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: lowerCAmelCase__ = self.num_hidden_layers + 2 def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ = None if self.use_input_mask: lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ = None if self.use_token_type_ids: lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase__ = None lowerCAmelCase__ = None lowerCAmelCase__ = None if self.use_labels: lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase__ = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : str , ): """simple docstring""" lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ ) lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowerCAmelCase__ = model(__magic_name__ ) lowerCAmelCase__ = [input_ids, input_mask] lowerCAmelCase__ = model(__magic_name__ ) lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) lowerCAmelCase__ = False lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ ) lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) lowerCAmelCase__ = False lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ ) lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : int , ): """simple docstring""" lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ ) lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowerCAmelCase__ = model(__magic_name__ ) lowerCAmelCase__ = [input_ids, input_mask] lowerCAmelCase__ = model(__magic_name__ ) lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) lowerCAmelCase__ = False lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ ) lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) ) lowerCAmelCase__ = False lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ ) lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , ): """simple docstring""" lowerCAmelCase__ = TFFunnelForPreTraining(config=__magic_name__ ) lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Dict , ): """simple docstring""" lowerCAmelCase__ = TFFunnelForMaskedLM(config=__magic_name__ ) lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Any , ): """simple docstring""" lowerCAmelCase__ = self.num_labels lowerCAmelCase__ = TFFunnelForSequenceClassification(config=__magic_name__ ) lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : List[str] , ): """simple docstring""" lowerCAmelCase__ = self.num_choices lowerCAmelCase__ = TFFunnelForMultipleChoice(config=__magic_name__ ) lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) ) lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) ) lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) ) lowerCAmelCase__ = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : str , ): """simple docstring""" lowerCAmelCase__ = self.num_labels lowerCAmelCase__ = TFFunnelForTokenClassification(config=__magic_name__ ) lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : List[str] , ): """simple docstring""" lowerCAmelCase__ = TFFunnelForQuestionAnswering(config=__magic_name__ ) lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = self.prepare_config_and_inputs() ( ( lowerCAmelCase__ ) ,( lowerCAmelCase__ ) ,( lowerCAmelCase__ ) ,( lowerCAmelCase__ ) ,( lowerCAmelCase__ ) ,( lowerCAmelCase__ ) ,( lowerCAmelCase__ ) , ) = config_and_inputs lowerCAmelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): snake_case__ :int = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) snake_case__ :Any = ( { 'feature-extraction': (TFFunnelBaseModel, TFFunnelModel), 'fill-mask': TFFunnelForMaskedLM, 'question-answering': TFFunnelForQuestionAnswering, 'text-classification': TFFunnelForSequenceClassification, 'token-classification': TFFunnelForTokenClassification, 'zero-shot': TFFunnelForSequenceClassification, } if is_tf_available() else {} ) snake_case__ :str = False snake_case__ :Any = False def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = TFFunnelModelTester(self ) lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" self.config_tester.run_common_tests() def __SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__magic_name__ ) @require_tf class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): snake_case__ :Any = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) snake_case__ :int = False snake_case__ :List[Any] = False def __SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" lowerCAmelCase__ = TFFunnelModelTester(self , base=__magic_name__ ) lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" self.config_tester.run_common_tests() def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__magic_name__ )
48
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): snake_case__ :str = StableUnCLIPImgaImgPipeline snake_case__ :Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS snake_case__ :Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS snake_case__ :List[str] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess snake_case__ :Optional[int] = frozenset([] ) def __SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" lowerCAmelCase__ = 32 lowerCAmelCase__ = embedder_hidden_size # image encoding components lowerCAmelCase__ = CLIPImageProcessor(crop_size=32 , size=32 ) torch.manual_seed(0 ) lowerCAmelCase__ = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=__magic_name__ , projection_dim=__magic_name__ , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) lowerCAmelCase__ = StableUnCLIPImageNormalizer(embedding_dim=__magic_name__ ) lowerCAmelCase__ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) lowerCAmelCase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) lowerCAmelCase__ = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__magic_name__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) lowerCAmelCase__ = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__magic_name__ , layers_per_block=1 , upcast_attention=__magic_name__ , use_linear_projection=__magic_name__ , ) torch.manual_seed(0 ) lowerCAmelCase__ = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=__magic_name__ , steps_offset=1 , ) torch.manual_seed(0 ) lowerCAmelCase__ = AutoencoderKL() lowerCAmelCase__ = { # image encoding components "feature_extractor": feature_extractor, "image_encoder": image_encoder.eval(), # image noising components "image_normalizer": image_normalizer.eval(), "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder.eval(), "unet": unet.eval(), "scheduler": scheduler, "vae": vae.eval(), } return components def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int]=0 , __magic_name__ : int=True ): """simple docstring""" if str(__magic_name__ ).startswith("mps" ): lowerCAmelCase__ = torch.manual_seed(__magic_name__ ) else: lowerCAmelCase__ = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ ) lowerCAmelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ ) if pil_image: lowerCAmelCase__ = input_image * 0.5 + 0.5 lowerCAmelCase__ = input_image.clamp(0 , 1 ) lowerCAmelCase__ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() lowerCAmelCase__ = DiffusionPipeline.numpy_to_pil(__magic_name__ )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ = self.get_dummy_components() lowerCAmelCase__ = StableUnCLIPImgaImgPipeline(**__magic_name__ ) lowerCAmelCase__ = sd_pipe.to(__magic_name__ ) sd_pipe.set_progress_bar_config(disable=__magic_name__ ) lowerCAmelCase__ = self.get_dummy_inputs(__magic_name__ ) inputs.update({"image_embeds": None} ) lowerCAmelCase__ = sd_pipe(**__magic_name__ ).images lowerCAmelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCAmelCase__ = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" lowerCAmelCase__ = torch_device in ["cpu", "mps"] self._test_attention_slicing_forward_pass(test_max_difference=__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" lowerCAmelCase__ = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=__magic_name__ ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__magic_name__ ) @slow @require_torch_gpu class A ( unittest.TestCase ): def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" lowerCAmelCase__ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) lowerCAmelCase__ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" ) lowerCAmelCase__ = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCAmelCase__ = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCAmelCase__ = pipe(__magic_name__ , "anime turle" , generator=__magic_name__ , output_type="np" ) lowerCAmelCase__ = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" lowerCAmelCase__ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) lowerCAmelCase__ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" ) lowerCAmelCase__ = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCAmelCase__ = torch.Generator(device="cpu" ).manual_seed(0 ) lowerCAmelCase__ = pipe(__magic_name__ , "anime turle" , generator=__magic_name__ , output_type="np" ) lowerCAmelCase__ = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" lowerCAmelCase__ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCAmelCase__ = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa ) lowerCAmelCase__ = pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCAmelCase__ = pipe( __magic_name__ , "anime turtle" , num_inference_steps=2 , output_type="np" , ) lowerCAmelCase__ = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
48
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging UpperCAmelCase__ : Tuple = logging.get_logger(__name__) UpperCAmelCase__ : List[str] = { "google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json", # See all umt5 models at https://huggingface.co/models?filter=umt5 } class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Union[str, Any] = 'umt5' snake_case__ :Any = ['past_key_values'] def __init__( self : List[Any] , __magic_name__ : Tuple=250112 , __magic_name__ : str=512 , __magic_name__ : int=64 , __magic_name__ : str=1024 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[Any]=6 , __magic_name__ : Dict=32 , __magic_name__ : Optional[Any]=128 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=1E-6 , __magic_name__ : Optional[int]=1.0 , __magic_name__ : Dict="gated-gelu" , __magic_name__ : List[str]=True , __magic_name__ : Tuple=True , __magic_name__ : Optional[int]="T5Tokenizer" , __magic_name__ : str=True , __magic_name__ : int=0 , __magic_name__ : Union[str, Any]=1 , __magic_name__ : str=0 , **__magic_name__ : Any , ): """simple docstring""" super().__init__( is_encoder_decoder=__magic_name__ , tokenizer_class=__magic_name__ , tie_word_embeddings=__magic_name__ , pad_token_id=__magic_name__ , eos_token_id=__magic_name__ , decoder_start_token_id=__magic_name__ , **__magic_name__ , ) lowerCAmelCase__ = vocab_size lowerCAmelCase__ = d_model lowerCAmelCase__ = d_kv lowerCAmelCase__ = d_ff lowerCAmelCase__ = num_layers lowerCAmelCase__ = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowerCAmelCase__ = num_heads lowerCAmelCase__ = relative_attention_num_buckets lowerCAmelCase__ = relative_attention_max_distance lowerCAmelCase__ = dropout_rate lowerCAmelCase__ = layer_norm_epsilon lowerCAmelCase__ = initializer_factor lowerCAmelCase__ = feed_forward_proj lowerCAmelCase__ = use_cache lowerCAmelCase__ = self.feed_forward_proj.split("-" ) lowerCAmelCase__ = act_info[-1] lowerCAmelCase__ = act_info[0] == "gated" if len(__magic_name__ ) > 1 and act_info[0] != "gated" or len(__magic_name__ ) > 2: raise ValueError( f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'" ) if feed_forward_proj == "gated-gelu": lowerCAmelCase__ = "gelu_new" @property def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" return self.d_model @property def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" return self.num_heads @property def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" return self.num_layers class A ( SCREAMING_SNAKE_CASE__ ): @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" lowerCAmelCase__ = { "input_ids": {0: "batch", 1: "encoder_sequence"}, "attention_mask": {0: "batch", 1: "encoder_sequence"}, } if self.use_past: lowerCAmelCase__ = "past_encoder_sequence + sequence" lowerCAmelCase__ = {0: "batch"} lowerCAmelCase__ = {0: "batch", 1: "past_decoder_sequence + sequence"} else: lowerCAmelCase__ = {0: "batch", 1: "decoder_sequence"} lowerCAmelCase__ = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(__magic_name__ , direction="inputs" ) return common_inputs @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" return 13 @property def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" return 5E-4
48
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase__ : List[str] = { "configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Optional[Any] = [ "LILT_PRETRAINED_MODEL_ARCHIVE_LIST", "LiltForQuestionAnswering", "LiltForSequenceClassification", "LiltForTokenClassification", "LiltModel", "LiltPreTrainedModel", ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys UpperCAmelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
48
'''simple docstring''' from __future__ import annotations from collections import Counter from random import random class A : def __init__( self : Optional[int] ): """simple docstring""" lowerCAmelCase__ = {} def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ): """simple docstring""" lowerCAmelCase__ = {} def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : str , __magic_name__ : float ): """simple docstring""" if nodea not in self.connections: self.add_node(__magic_name__ ) if nodea not in self.connections: self.add_node(__magic_name__ ) lowerCAmelCase__ = probability def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" return list(self.connections ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ): """simple docstring""" lowerCAmelCase__ = 0 lowerCAmelCase__ = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def A ( UpperCamelCase_ : str , UpperCamelCase_ : list[tuple[str, str, float]] , UpperCamelCase_ : int ) -> dict[str, int]: '''simple docstring''' lowerCAmelCase__ = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ = Counter(graph.get_nodes() ) lowerCAmelCase__ = start for _ in range(UpperCamelCase_ ): lowerCAmelCase__ = graph.transition(UpperCamelCase_ ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
48
1
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def A ( UpperCamelCase_ : Optional[int] ) -> Any: '''simple docstring''' if ( (cp >= 0X4_e00 and cp <= 0X9_fff) or (cp >= 0X3_400 and cp <= 0X4_dbf) # or (cp >= 0X20_000 and cp <= 0X2a_6df) # or (cp >= 0X2a_700 and cp <= 0X2b_73f) # or (cp >= 0X2b_740 and cp <= 0X2b_81f) # or (cp >= 0X2b_820 and cp <= 0X2c_eaf) # or (cp >= 0Xf_900 and cp <= 0Xf_aff) or (cp >= 0X2f_800 and cp <= 0X2f_a1f) # ): # return True return False def A ( UpperCamelCase_ : str ) -> Any: '''simple docstring''' for char in word: lowerCAmelCase__ = ord(UpperCamelCase_ ) if not _is_chinese_char(UpperCamelCase_ ): return 0 return 1 def A ( UpperCamelCase_ : List[str] ) -> List[Any]: '''simple docstring''' lowerCAmelCase__ = set() for token in tokens: lowerCAmelCase__ = len(UpperCamelCase_ ) > 1 and is_chinese(UpperCamelCase_ ) if chinese_word: word_set.add(UpperCamelCase_ ) lowerCAmelCase__ = list(UpperCamelCase_ ) return word_list def A ( UpperCamelCase_ : List[str] , UpperCamelCase_ : set() ) -> str: '''simple docstring''' if not chinese_word_set: return bert_tokens lowerCAmelCase__ = max([len(UpperCamelCase_ ) for w in chinese_word_set] ) lowerCAmelCase__ = bert_tokens lowerCAmelCase__ ,lowerCAmelCase__ = 0, len(UpperCamelCase_ ) while start < end: lowerCAmelCase__ = True if is_chinese(bert_word[start] ): lowerCAmelCase__ = min(end - start , UpperCamelCase_ ) for i in range(UpperCamelCase_ , 1 , -1 ): lowerCAmelCase__ = "".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): lowerCAmelCase__ = "##" + bert_word[j] lowerCAmelCase__ = start + i lowerCAmelCase__ = False break if single_word: start += 1 return bert_word def A ( UpperCamelCase_ : List[str] , UpperCamelCase_ : LTP , UpperCamelCase_ : BertTokenizer ) -> str: '''simple docstring''' lowerCAmelCase__ = [] for i in range(0 , len(UpperCamelCase_ ) , 1_00 ): lowerCAmelCase__ = ltp_tokenizer.pipeline(lines[i : i + 1_00] , tasks=["cws"] ).cws lowerCAmelCase__ = [get_chinese_word(UpperCamelCase_ ) for r in res] ltp_res.extend(UpperCamelCase_ ) assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ) lowerCAmelCase__ = [] for i in range(0 , len(UpperCamelCase_ ) , 1_00 ): lowerCAmelCase__ = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=5_12 ) bert_res.extend(res["input_ids"] ) assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ) lowerCAmelCase__ = [] for input_ids, chinese_word in zip(UpperCamelCase_ , UpperCamelCase_ ): lowerCAmelCase__ = [] for id in input_ids: lowerCAmelCase__ = bert_tokenizer._convert_id_to_token(UpperCamelCase_ ) input_tokens.append(UpperCamelCase_ ) lowerCAmelCase__ = add_sub_symbol(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(UpperCamelCase_ ): if token[:2] == "##": lowerCAmelCase__ = token[2:] # save chinese tokens' pos if len(UpperCamelCase_ ) == 1 and _is_chinese_char(ord(UpperCamelCase_ ) ): ref_id.append(UpperCamelCase_ ) ref_ids.append(UpperCamelCase_ ) assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ) return ref_ids def A ( UpperCamelCase_ : List[str] ) -> str: '''simple docstring''' with open(args.file_name , "r" , encoding="utf-8" ) as f: lowerCAmelCase__ = f.readlines() lowerCAmelCase__ = [line.strip() for line in data if len(UpperCamelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' lowerCAmelCase__ = LTP(args.ltp ) # faster in GPU device lowerCAmelCase__ = BertTokenizer.from_pretrained(args.bert ) lowerCAmelCase__ = prepare_ref(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) with open(args.save_path , "w" , encoding="utf-8" ) as f: lowerCAmelCase__ = [json.dumps(UpperCamelCase_ ) + "\n" for ref in ref_ids] f.writelines(UpperCamelCase_ ) if __name__ == "__main__": UpperCAmelCase__ : Any = argparse.ArgumentParser(description="prepare_chinese_ref") parser.add_argument( "--file_name", required=False, type=str, default="./resources/chinese-demo.txt", help="file need process, same as training data in lm", ) parser.add_argument( "--ltp", required=False, type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path", ) parser.add_argument( "--bert", required=False, type=str, default="./resources/robert", help="resources for Bert tokenizer", ) parser.add_argument( "--save_path", required=False, type=str, default="./resources/ref.txt", help="path to save res", ) UpperCAmelCase__ : Union[str, Any] = parser.parse_args() main(args)
48
'''simple docstring''' import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration UpperCAmelCase__ : Optional[Any] = pytest.mark.integration UpperCAmelCase__ : str = {"comet"} UpperCAmelCase__ : Optional[Any] = importlib.util.find_spec("fairseq") is not None UpperCAmelCase__ : Optional[int] = {"code_eval"} UpperCAmelCase__ : List[Any] = os.name == "nt" UpperCAmelCase__ : Optional[int] = {"bertscore", "frugalscore", "perplexity"} UpperCAmelCase__ : int = importlib.util.find_spec("transformers") is not None def A ( UpperCamelCase_ : Optional[int] ) -> Optional[Any]: '''simple docstring''' @wraps(UpperCamelCase_ ) def wrapper(self : Optional[Any] , UpperCamelCase_ : List[str] ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest("\"test requires Fairseq\"" ) else: test_case(self , UpperCamelCase_ ) return wrapper def A ( UpperCamelCase_ : List[Any] ) -> str: '''simple docstring''' @wraps(UpperCamelCase_ ) def wrapper(self : Optional[int] , UpperCamelCase_ : int ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest("\"test requires transformers\"" ) else: test_case(self , UpperCamelCase_ ) return wrapper def A ( UpperCamelCase_ : Any ) -> int: '''simple docstring''' @wraps(UpperCamelCase_ ) def wrapper(self : Optional[int] , UpperCamelCase_ : Optional[Any] ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest("\"test not supported on Windows\"" ) else: test_case(self , UpperCamelCase_ ) return wrapper def A ( ) -> Tuple: '''simple docstring''' lowerCAmelCase__ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @local class A ( parameterized.TestCase ): snake_case__ :Union[str, Any] = {} snake_case__ :Optional[Any] = None @pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" ) @pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = "[...]" lowerCAmelCase__ = importlib.import_module( datasets.load.metric_module_factory(os.path.join("metrics" , __magic_name__ ) ).module_path ) lowerCAmelCase__ = datasets.load.import_main_class(metric_module.__name__ , dataset=__magic_name__ ) # check parameters lowerCAmelCase__ = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(__magic_name__ , metric_module.__name__ ): with self.use_local_metrics(): try: lowerCAmelCase__ = doctest.testmod(__magic_name__ , verbose=__magic_name__ , raise_on_error=__magic_name__ ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Tuple ): """simple docstring""" lowerCAmelCase__ = "[...]" lowerCAmelCase__ = importlib.import_module( datasets.load.metric_module_factory(os.path.join("metrics" , __magic_name__ ) ).module_path ) # run doctest with self.use_local_metrics(): lowerCAmelCase__ = doctest.testmod(__magic_name__ , verbose=__magic_name__ , raise_on_error=__magic_name__ ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str ): """simple docstring""" if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](__magic_name__ ): yield else: yield @contextmanager def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" def load_local_metric(__magic_name__ : Union[str, Any] , *__magic_name__ : Any , **__magic_name__ : Any ): return load_metric(os.path.join("metrics" , __magic_name__ ) , *__magic_name__ , **__magic_name__ ) with patch("datasets.load_metric" ) as mock_load_metric: lowerCAmelCase__ = load_local_metric yield @classmethod def __SCREAMING_SNAKE_CASE ( cls : Any , __magic_name__ : Optional[int] ): """simple docstring""" def wrapper(__magic_name__ : Dict ): lowerCAmelCase__ = contextmanager(__magic_name__ ) lowerCAmelCase__ = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher("bleurt" ) def A ( UpperCamelCase_ : str ) -> Any: '''simple docstring''' import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags class A ( SCREAMING_SNAKE_CASE__ ): def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] ): """simple docstring""" assert len(input_dict["input_ids"] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch("bleurt.score._create_predictor" ) as mock_create_predictor: lowerCAmelCase__ = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher("bertscore" ) def A ( UpperCamelCase_ : List[Any] ) -> Optional[Any]: '''simple docstring''' import torch def bert_cos_score_idf(UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : List[str] ): return torch.tensor([[1.0, 1.0, 1.0]] * len(UpperCamelCase_ ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch("bert_score.scorer.get_model" ), patch( "bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf: lowerCAmelCase__ = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher("comet" ) def A ( UpperCamelCase_ : Optional[int] ) -> Any: '''simple docstring''' def load_from_checkpoint(UpperCamelCase_ : Tuple ): class A : def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] , *__magic_name__ : int , **__magic_name__ : Dict ): """simple docstring""" assert len(__magic_name__ ) == 2 lowerCAmelCase__ = [0.19, 0.92] return scores, sum(__magic_name__ ) / len(__magic_name__ ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch("comet.download_model" ) as mock_download_model: lowerCAmelCase__ = None with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint: lowerCAmelCase__ = load_from_checkpoint yield def A ( ) -> Tuple: '''simple docstring''' lowerCAmelCase__ = load_metric(os.path.join("metrics" , "seqeval" ) ) lowerCAmelCase__ = "ERROR" lowerCAmelCase__ = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}""" with pytest.raises(UpperCamelCase_ , match=re.escape(UpperCamelCase_ ) ): metric.compute(predictions=[] , references=[] , scheme=UpperCamelCase_ )
48
1
'''simple docstring''' import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): snake_case__ :Optional[int] = AutoencoderKL snake_case__ :int = 'sample' snake_case__ :str = 1e-2 @property def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" lowerCAmelCase__ = 4 lowerCAmelCase__ = 3 lowerCAmelCase__ = (32, 32) lowerCAmelCase__ = floats_tensor((batch_size, num_channels) + sizes ).to(__magic_name__ ) return {"sample": image} @property def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" return (3, 32, 32) @property def __SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" return (3, 32, 32) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } lowerCAmelCase__ = self.dummy_input return init_dict, inputs_dict def __SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" pass @unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ ,lowerCAmelCase__ = self.prepare_init_args_and_inputs_for_common() lowerCAmelCase__ = self.model_class(**__magic_name__ ) model.to(__magic_name__ ) assert not model.is_gradient_checkpointing and model.training lowerCAmelCase__ = model(**__magic_name__ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() lowerCAmelCase__ = torch.randn_like(__magic_name__ ) lowerCAmelCase__ = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing lowerCAmelCase__ = self.model_class(**__magic_name__ ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(__magic_name__ ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training lowerCAmelCase__ = model_a(**__magic_name__ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() lowerCAmelCase__ = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) lowerCAmelCase__ = dict(model.named_parameters() ) lowerCAmelCase__ = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ ,lowerCAmelCase__ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=__magic_name__ ) self.assertIsNotNone(__magic_name__ ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(__magic_name__ ) lowerCAmelCase__ = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def __SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" lowerCAmelCase__ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ) lowerCAmelCase__ = model.to(__magic_name__ ) model.eval() if torch_device == "mps": lowerCAmelCase__ = torch.manual_seed(0 ) else: lowerCAmelCase__ = torch.Generator(device=__magic_name__ ).manual_seed(0 ) lowerCAmelCase__ = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) lowerCAmelCase__ = image.to(__magic_name__ ) with torch.no_grad(): lowerCAmelCase__ = model(__magic_name__ , sample_posterior=__magic_name__ , generator=__magic_name__ ).sample lowerCAmelCase__ = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": lowerCAmelCase__ = torch.tensor( [ -4.0078E-01, -3.8323E-04, -1.2681E-01, -1.1462E-01, 2.0095E-01, 1.0893E-01, -8.8247E-02, -3.0361E-01, -9.8644E-03, ] ) elif torch_device == "cpu": lowerCAmelCase__ = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) else: lowerCAmelCase__ = torch.tensor( [-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] ) self.assertTrue(torch_all_close(__magic_name__ , __magic_name__ , rtol=1E-2 ) ) @slow class A ( unittest.TestCase ): def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Any ): """simple docstring""" return f"""gaussian_noise_s={seed}_shape={"_".join([str(__magic_name__ ) for s in shape] )}.npy""" def __SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[str]=0 , __magic_name__ : str=(4, 3, 512, 512) , __magic_name__ : str=False ): """simple docstring""" lowerCAmelCase__ = torch.floataa if fpaa else torch.floataa lowerCAmelCase__ = torch.from_numpy(load_hf_numpy(self.get_file_format(__magic_name__ , __magic_name__ ) ) ).to(__magic_name__ ).to(__magic_name__ ) return image def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : List[str]="CompVis/stable-diffusion-v1-4" , __magic_name__ : Optional[Any]=False ): """simple docstring""" lowerCAmelCase__ = "fp16" if fpaa else None lowerCAmelCase__ = torch.floataa if fpaa else torch.floataa lowerCAmelCase__ = AutoencoderKL.from_pretrained( __magic_name__ , subfolder="vae" , torch_dtype=__magic_name__ , revision=__magic_name__ , ) model.to(__magic_name__ ).eval() return model def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Union[str, Any]=0 ): """simple docstring""" if torch_device == "mps": return torch.manual_seed(__magic_name__ ) return torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ ) @parameterized.expand( [ # fmt: off [33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : Tuple , __magic_name__ : Optional[int] , __magic_name__ : Dict ): """simple docstring""" lowerCAmelCase__ = self.get_sd_vae_model() lowerCAmelCase__ = self.get_sd_image(__magic_name__ ) lowerCAmelCase__ = self.get_generator(__magic_name__ ) with torch.no_grad(): lowerCAmelCase__ = model(__magic_name__ , generator=__magic_name__ , sample_posterior=__magic_name__ ).sample assert sample.shape == image.shape lowerCAmelCase__ = sample[-1, -2:, -2:, :2].flatten().float().cpu() lowerCAmelCase__ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(__magic_name__ , __magic_name__ , atol=3E-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_gpu def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Any ): """simple docstring""" lowerCAmelCase__ = self.get_sd_vae_model(fpaa=__magic_name__ ) lowerCAmelCase__ = self.get_sd_image(__magic_name__ , fpaa=__magic_name__ ) lowerCAmelCase__ = self.get_generator(__magic_name__ ) with torch.no_grad(): lowerCAmelCase__ = model(__magic_name__ , generator=__magic_name__ , sample_posterior=__magic_name__ ).sample assert sample.shape == image.shape lowerCAmelCase__ = sample[-1, -2:, :2, -2:].flatten().float().cpu() lowerCAmelCase__ = torch.tensor(__magic_name__ ) assert torch_all_close(__magic_name__ , __magic_name__ , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Dict ): """simple docstring""" lowerCAmelCase__ = self.get_sd_vae_model() lowerCAmelCase__ = self.get_sd_image(__magic_name__ ) with torch.no_grad(): lowerCAmelCase__ = model(__magic_name__ ).sample assert sample.shape == image.shape lowerCAmelCase__ = sample[-1, -2:, -2:, :2].flatten().float().cpu() lowerCAmelCase__ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(__magic_name__ , __magic_name__ , atol=3E-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_gpu def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : List[str] , __magic_name__ : List[str] ): """simple docstring""" lowerCAmelCase__ = self.get_sd_vae_model() lowerCAmelCase__ = self.get_sd_image(__magic_name__ , shape=(3, 4, 64, 64) ) with torch.no_grad(): lowerCAmelCase__ = model.decode(__magic_name__ ).sample assert list(sample.shape ) == [3, 3, 512, 512] lowerCAmelCase__ = sample[-1, -2:, :2, -2:].flatten().cpu() lowerCAmelCase__ = torch.tensor(__magic_name__ ) assert torch_all_close(__magic_name__ , __magic_name__ , atol=1E-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_gpu def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Optional[int] , __magic_name__ : Tuple ): """simple docstring""" lowerCAmelCase__ = self.get_sd_vae_model(fpaa=__magic_name__ ) lowerCAmelCase__ = self.get_sd_image(__magic_name__ , shape=(3, 4, 64, 64) , fpaa=__magic_name__ ) with torch.no_grad(): lowerCAmelCase__ = model.decode(__magic_name__ ).sample assert list(sample.shape ) == [3, 3, 512, 512] lowerCAmelCase__ = sample[-1, -2:, :2, -2:].flatten().float().cpu() lowerCAmelCase__ = torch.tensor(__magic_name__ ) assert torch_all_close(__magic_name__ , __magic_name__ , atol=5E-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = self.get_sd_vae_model(fpaa=__magic_name__ ) lowerCAmelCase__ = self.get_sd_image(__magic_name__ , shape=(3, 4, 64, 64) , fpaa=__magic_name__ ) with torch.no_grad(): lowerCAmelCase__ = model.decode(__magic_name__ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): lowerCAmelCase__ = model.decode(__magic_name__ ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(__magic_name__ , __magic_name__ , atol=1E-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[Any] ): """simple docstring""" lowerCAmelCase__ = self.get_sd_vae_model() lowerCAmelCase__ = self.get_sd_image(__magic_name__ , shape=(3, 4, 64, 64) ) with torch.no_grad(): lowerCAmelCase__ = model.decode(__magic_name__ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): lowerCAmelCase__ = model.decode(__magic_name__ ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(__magic_name__ , __magic_name__ , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Dict , __magic_name__ : Tuple ): """simple docstring""" lowerCAmelCase__ = self.get_sd_vae_model() lowerCAmelCase__ = self.get_sd_image(__magic_name__ ) lowerCAmelCase__ = self.get_generator(__magic_name__ ) with torch.no_grad(): lowerCAmelCase__ = model.encode(__magic_name__ ).latent_dist lowerCAmelCase__ = dist.sample(generator=__magic_name__ ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] lowerCAmelCase__ = sample[0, -1, -3:, -3:].flatten().cpu() lowerCAmelCase__ = torch.tensor(__magic_name__ ) lowerCAmelCase__ = 3E-3 if torch_device != "mps" else 1E-2 assert torch_all_close(__magic_name__ , __magic_name__ , atol=__magic_name__ )
48
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool UpperCAmelCase__ : int = { "Acehnese Arabic": "ace_Arab", "Acehnese Latin": "ace_Latn", "Mesopotamian Arabic": "acm_Arab", "Ta'izzi-Adeni Arabic": "acq_Arab", "Tunisian Arabic": "aeb_Arab", "Afrikaans": "afr_Latn", "South Levantine Arabic": "ajp_Arab", "Akan": "aka_Latn", "Amharic": "amh_Ethi", "North Levantine Arabic": "apc_Arab", "Modern Standard Arabic": "arb_Arab", "Modern Standard Arabic Romanized": "arb_Latn", "Najdi Arabic": "ars_Arab", "Moroccan Arabic": "ary_Arab", "Egyptian Arabic": "arz_Arab", "Assamese": "asm_Beng", "Asturian": "ast_Latn", "Awadhi": "awa_Deva", "Central Aymara": "ayr_Latn", "South Azerbaijani": "azb_Arab", "North Azerbaijani": "azj_Latn", "Bashkir": "bak_Cyrl", "Bambara": "bam_Latn", "Balinese": "ban_Latn", "Belarusian": "bel_Cyrl", "Bemba": "bem_Latn", "Bengali": "ben_Beng", "Bhojpuri": "bho_Deva", "Banjar Arabic": "bjn_Arab", "Banjar Latin": "bjn_Latn", "Standard Tibetan": "bod_Tibt", "Bosnian": "bos_Latn", "Buginese": "bug_Latn", "Bulgarian": "bul_Cyrl", "Catalan": "cat_Latn", "Cebuano": "ceb_Latn", "Czech": "ces_Latn", "Chokwe": "cjk_Latn", "Central Kurdish": "ckb_Arab", "Crimean Tatar": "crh_Latn", "Welsh": "cym_Latn", "Danish": "dan_Latn", "German": "deu_Latn", "Southwestern Dinka": "dik_Latn", "Dyula": "dyu_Latn", "Dzongkha": "dzo_Tibt", "Greek": "ell_Grek", "English": "eng_Latn", "Esperanto": "epo_Latn", "Estonian": "est_Latn", "Basque": "eus_Latn", "Ewe": "ewe_Latn", "Faroese": "fao_Latn", "Fijian": "fij_Latn", "Finnish": "fin_Latn", "Fon": "fon_Latn", "French": "fra_Latn", "Friulian": "fur_Latn", "Nigerian Fulfulde": "fuv_Latn", "Scottish Gaelic": "gla_Latn", "Irish": "gle_Latn", "Galician": "glg_Latn", "Guarani": "grn_Latn", "Gujarati": "guj_Gujr", "Haitian Creole": "hat_Latn", "Hausa": "hau_Latn", "Hebrew": "heb_Hebr", "Hindi": "hin_Deva", "Chhattisgarhi": "hne_Deva", "Croatian": "hrv_Latn", "Hungarian": "hun_Latn", "Armenian": "hye_Armn", "Igbo": "ibo_Latn", "Ilocano": "ilo_Latn", "Indonesian": "ind_Latn", "Icelandic": "isl_Latn", "Italian": "ita_Latn", "Javanese": "jav_Latn", "Japanese": "jpn_Jpan", "Kabyle": "kab_Latn", "Jingpho": "kac_Latn", "Kamba": "kam_Latn", "Kannada": "kan_Knda", "Kashmiri Arabic": "kas_Arab", "Kashmiri Devanagari": "kas_Deva", "Georgian": "kat_Geor", "Central Kanuri Arabic": "knc_Arab", "Central Kanuri Latin": "knc_Latn", "Kazakh": "kaz_Cyrl", "Kabiyè": "kbp_Latn", "Kabuverdianu": "kea_Latn", "Khmer": "khm_Khmr", "Kikuyu": "kik_Latn", "Kinyarwanda": "kin_Latn", "Kyrgyz": "kir_Cyrl", "Kimbundu": "kmb_Latn", "Northern Kurdish": "kmr_Latn", "Kikongo": "kon_Latn", "Korean": "kor_Hang", "Lao": "lao_Laoo", "Ligurian": "lij_Latn", "Limburgish": "lim_Latn", "Lingala": "lin_Latn", "Lithuanian": "lit_Latn", "Lombard": "lmo_Latn", "Latgalian": "ltg_Latn", "Luxembourgish": "ltz_Latn", "Luba-Kasai": "lua_Latn", "Ganda": "lug_Latn", "Luo": "luo_Latn", "Mizo": "lus_Latn", "Standard Latvian": "lvs_Latn", "Magahi": "mag_Deva", "Maithili": "mai_Deva", "Malayalam": "mal_Mlym", "Marathi": "mar_Deva", "Minangkabau Arabic ": "min_Arab", "Minangkabau Latin": "min_Latn", "Macedonian": "mkd_Cyrl", "Plateau Malagasy": "plt_Latn", "Maltese": "mlt_Latn", "Meitei Bengali": "mni_Beng", "Halh Mongolian": "khk_Cyrl", "Mossi": "mos_Latn", "Maori": "mri_Latn", "Burmese": "mya_Mymr", "Dutch": "nld_Latn", "Norwegian Nynorsk": "nno_Latn", "Norwegian Bokmål": "nob_Latn", "Nepali": "npi_Deva", "Northern Sotho": "nso_Latn", "Nuer": "nus_Latn", "Nyanja": "nya_Latn", "Occitan": "oci_Latn", "West Central Oromo": "gaz_Latn", "Odia": "ory_Orya", "Pangasinan": "pag_Latn", "Eastern Panjabi": "pan_Guru", "Papiamento": "pap_Latn", "Western Persian": "pes_Arab", "Polish": "pol_Latn", "Portuguese": "por_Latn", "Dari": "prs_Arab", "Southern Pashto": "pbt_Arab", "Ayacucho Quechua": "quy_Latn", "Romanian": "ron_Latn", "Rundi": "run_Latn", "Russian": "rus_Cyrl", "Sango": "sag_Latn", "Sanskrit": "san_Deva", "Santali": "sat_Olck", "Sicilian": "scn_Latn", "Shan": "shn_Mymr", "Sinhala": "sin_Sinh", "Slovak": "slk_Latn", "Slovenian": "slv_Latn", "Samoan": "smo_Latn", "Shona": "sna_Latn", "Sindhi": "snd_Arab", "Somali": "som_Latn", "Southern Sotho": "sot_Latn", "Spanish": "spa_Latn", "Tosk Albanian": "als_Latn", "Sardinian": "srd_Latn", "Serbian": "srp_Cyrl", "Swati": "ssw_Latn", "Sundanese": "sun_Latn", "Swedish": "swe_Latn", "Swahili": "swh_Latn", "Silesian": "szl_Latn", "Tamil": "tam_Taml", "Tatar": "tat_Cyrl", "Telugu": "tel_Telu", "Tajik": "tgk_Cyrl", "Tagalog": "tgl_Latn", "Thai": "tha_Thai", "Tigrinya": "tir_Ethi", "Tamasheq Latin": "taq_Latn", "Tamasheq Tifinagh": "taq_Tfng", "Tok Pisin": "tpi_Latn", "Tswana": "tsn_Latn", "Tsonga": "tso_Latn", "Turkmen": "tuk_Latn", "Tumbuka": "tum_Latn", "Turkish": "tur_Latn", "Twi": "twi_Latn", "Central Atlas Tamazight": "tzm_Tfng", "Uyghur": "uig_Arab", "Ukrainian": "ukr_Cyrl", "Umbundu": "umb_Latn", "Urdu": "urd_Arab", "Northern Uzbek": "uzn_Latn", "Venetian": "vec_Latn", "Vietnamese": "vie_Latn", "Waray": "war_Latn", "Wolof": "wol_Latn", "Xhosa": "xho_Latn", "Eastern Yiddish": "ydd_Hebr", "Yoruba": "yor_Latn", "Yue Chinese": "yue_Hant", "Chinese Simplified": "zho_Hans", "Chinese Traditional": "zho_Hant", "Standard Malay": "zsm_Latn", "Zulu": "zul_Latn", } class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Tuple = 'facebook/nllb-200-distilled-600M' snake_case__ :Optional[Any] = ( 'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ' 'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ' 'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ' 'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.' ) snake_case__ :List[Any] = 'translator' snake_case__ :List[Any] = AutoTokenizer snake_case__ :Optional[Any] = AutoModelForSeqaSeqLM snake_case__ :List[str] = LANGUAGE_CODES snake_case__ :List[Any] = ['text', 'text', 'text'] snake_case__ :List[Any] = ['text'] def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ): """simple docstring""" if src_lang not in self.lang_to_code: raise ValueError(f"""{src_lang} is not a supported language.""" ) if tgt_lang not in self.lang_to_code: raise ValueError(f"""{tgt_lang} is not a supported language.""" ) lowerCAmelCase__ = self.lang_to_code[src_lang] lowerCAmelCase__ = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( __magic_name__ , return_tensors="pt" , src_lang=__magic_name__ , tgt_lang=__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] ): """simple docstring""" return self.model.generate(**__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Tuple ): """simple docstring""" return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__magic_name__ )
48
1
'''simple docstring''' import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A : def __init__( self : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any]=13 , __magic_name__ : Tuple=32 , __magic_name__ : Tuple=2 , __magic_name__ : Tuple=3 , __magic_name__ : Dict=16 , __magic_name__ : List[Any]=[1, 2, 1] , __magic_name__ : Union[str, Any]=[2, 2, 4] , __magic_name__ : str=2 , __magic_name__ : Union[str, Any]=2.0 , __magic_name__ : Optional[int]=True , __magic_name__ : Dict=0.0 , __magic_name__ : List[Any]=0.0 , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : int="gelu" , __magic_name__ : List[str]=False , __magic_name__ : Optional[Any]=True , __magic_name__ : Any=0.02 , __magic_name__ : List[str]=1E-5 , __magic_name__ : Dict=True , __magic_name__ : List[str]=None , __magic_name__ : Optional[int]=True , __magic_name__ : str=10 , __magic_name__ : List[str]=8 , ): """simple docstring""" lowerCAmelCase__ = parent lowerCAmelCase__ = batch_size lowerCAmelCase__ = image_size lowerCAmelCase__ = patch_size lowerCAmelCase__ = num_channels lowerCAmelCase__ = embed_dim lowerCAmelCase__ = depths lowerCAmelCase__ = num_heads lowerCAmelCase__ = window_size lowerCAmelCase__ = mlp_ratio lowerCAmelCase__ = qkv_bias lowerCAmelCase__ = hidden_dropout_prob lowerCAmelCase__ = attention_probs_dropout_prob lowerCAmelCase__ = drop_path_rate lowerCAmelCase__ = hidden_act lowerCAmelCase__ = use_absolute_embeddings lowerCAmelCase__ = patch_norm lowerCAmelCase__ = layer_norm_eps lowerCAmelCase__ = initializer_range lowerCAmelCase__ = is_training lowerCAmelCase__ = scope lowerCAmelCase__ = use_labels lowerCAmelCase__ = type_sequence_label_size lowerCAmelCase__ = encoder_stride def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ = None if self.use_labels: lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ = self.get_config() return config, pixel_values, labels def __SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Optional[int] ): """simple docstring""" lowerCAmelCase__ = SwinvaModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() lowerCAmelCase__ = model(__magic_name__ ) lowerCAmelCase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCAmelCase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : str ): """simple docstring""" lowerCAmelCase__ = SwinvaForMaskedImageModeling(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCAmelCase__ = 1 lowerCAmelCase__ = SwinvaForMaskedImageModeling(__magic_name__ ) model.to(__magic_name__ ) model.eval() lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : str , __magic_name__ : str , __magic_name__ : Dict ): """simple docstring""" lowerCAmelCase__ = self.type_sequence_label_size lowerCAmelCase__ = SwinvaForImageClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() lowerCAmelCase__ = model(__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = self.prepare_config_and_inputs() lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = config_and_inputs lowerCAmelCase__ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): snake_case__ :Optional[int] = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) snake_case__ :Any = ( {'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification} if is_torch_available() else {} ) snake_case__ :Optional[int] = False snake_case__ :Union[str, Any] = False snake_case__ :Any = False snake_case__ :List[str] = False def __SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" lowerCAmelCase__ = SwinvaModelTester(self ) lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ , embed_dim=37 ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) @unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0." ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" pass @unittest.skip(reason="Swinv2 does not use inputs_embeds" ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ = model_class(__magic_name__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCAmelCase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) ) def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ = model_class(__magic_name__ ) lowerCAmelCase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ = [*signature.parameters.keys()] lowerCAmelCase__ = ["pixel_values"] self.assertListEqual(arg_names[:1] , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ = True for model_class in self.all_model_classes: lowerCAmelCase__ = True lowerCAmelCase__ = False lowerCAmelCase__ = True lowerCAmelCase__ = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): lowerCAmelCase__ = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) lowerCAmelCase__ = outputs.attentions lowerCAmelCase__ = len(self.model_tester.depths ) self.assertEqual(len(__magic_name__ ) , __magic_name__ ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCAmelCase__ = True lowerCAmelCase__ = config.window_size**2 lowerCAmelCase__ = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): lowerCAmelCase__ = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) lowerCAmelCase__ = outputs.attentions self.assertEqual(len(__magic_name__ ) , __magic_name__ ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) lowerCAmelCase__ = len(__magic_name__ ) # Check attention is always last and order is fine lowerCAmelCase__ = True lowerCAmelCase__ = True lowerCAmelCase__ = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): lowerCAmelCase__ = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) if hasattr(self.model_tester , "num_hidden_states_types" ): lowerCAmelCase__ = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states lowerCAmelCase__ = 2 self.assertEqual(out_len + added_hidden_states , len(__magic_name__ ) ) lowerCAmelCase__ = outputs.attentions self.assertEqual(len(__magic_name__ ) , __magic_name__ ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple ): """simple docstring""" lowerCAmelCase__ = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): lowerCAmelCase__ = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) lowerCAmelCase__ = outputs.hidden_states lowerCAmelCase__ = getattr( self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__magic_name__ ) , __magic_name__ ) # Swinv2 has a different seq_length lowerCAmelCase__ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCAmelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) lowerCAmelCase__ = outputs.reshaped_hidden_states self.assertEqual(len(__magic_name__ ) , __magic_name__ ) lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = reshaped_hidden_states[0].shape lowerCAmelCase__ = ( reshaped_hidden_states[0].view(__magic_name__ , __magic_name__ , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def __SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: lowerCAmelCase__ = True self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase__ = True self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ = 3 lowerCAmelCase__ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCAmelCase__ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCAmelCase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCAmelCase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: lowerCAmelCase__ = True self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase__ = True self.check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ , (padded_height, padded_width) ) def __SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__magic_name__ ) @slow def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ = SwinvaModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" lowerCAmelCase__ ,lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ = _config_zero_init(__magic_name__ ) for model_class in self.all_model_classes: lowerCAmelCase__ = model_class(config=__magic_name__ ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @require_vision @require_torch class A ( unittest.TestCase ): @cached_property def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" return ( AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ) if is_vision_available() else None ) @slow def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" lowerCAmelCase__ = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ).to( __magic_name__ ) lowerCAmelCase__ = self.default_image_processor lowerCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) lowerCAmelCase__ = image_processor(images=__magic_name__ , return_tensors="pt" ).to(__magic_name__ ) # forward pass with torch.no_grad(): lowerCAmelCase__ = model(**__magic_name__ ) # verify the logits lowerCAmelCase__ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) lowerCAmelCase__ = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
48
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ : int = logging.get_logger(__name__) class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Any = 'timm_backbone' def __init__( self : Tuple , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=3 , __magic_name__ : Dict=True , __magic_name__ : str=True , __magic_name__ : List[Any]=None , **__magic_name__ : Tuple , ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = backbone lowerCAmelCase__ = num_channels lowerCAmelCase__ = features_only lowerCAmelCase__ = use_pretrained_backbone lowerCAmelCase__ = True lowerCAmelCase__ = out_indices if out_indices is not None else (-1,)
48
1
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCAmelCase__ : Optional[int] = { "configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : List[Any] = ["VivitImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : List[str] = [ "VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "VivitModel", "VivitPreTrainedModel", "VivitForVideoClassification", ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys UpperCAmelCase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
48
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Tuple = 'Salesforce/blip-image-captioning-base' snake_case__ :List[Any] = ( 'This is a tool that generates a description of an image. It takes an input named `image` which should be the ' 'image to caption, and returns a text that contains the description in English.' ) snake_case__ :List[Any] = 'image_captioner' snake_case__ :Optional[int] = AutoModelForVisionaSeq snake_case__ :Optional[int] = ['image'] snake_case__ :Any = ['text'] def __init__( self : str , *__magic_name__ : List[str] , **__magic_name__ : Tuple ): """simple docstring""" requires_backends(self , ["vision"] ) super().__init__(*__magic_name__ , **__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : "Image" ): """simple docstring""" return self.pre_processor(images=__magic_name__ , return_tensors="pt" ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Tuple ): """simple docstring""" return self.model.generate(**__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Optional[int] ): """simple docstring""" return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0].strip()
48
1
'''simple docstring''' def A ( UpperCamelCase_ : list ) -> list: '''simple docstring''' lowerCAmelCase__ = len(UpperCamelCase_ ) for i in range(1 , UpperCamelCase_ ): lowerCAmelCase__ = collection[i] lowerCAmelCase__ = 0 lowerCAmelCase__ = i - 1 while low <= high: lowerCAmelCase__ = (low + high) // 2 if val < collection[mid]: lowerCAmelCase__ = mid - 1 else: lowerCAmelCase__ = mid + 1 for j in range(UpperCamelCase_ , UpperCamelCase_ , -1 ): lowerCAmelCase__ = collection[j - 1] lowerCAmelCase__ = val return collection if __name__ == "__main__": UpperCAmelCase__ : Tuple = input("Enter numbers separated by a comma:\n").strip() UpperCAmelCase__ : List[Any] = [int(item) for item in user_input.split(",")] print(binary_insertion_sort(unsorted))
48
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging UpperCAmelCase__ : Tuple = logging.get_logger(__name__) UpperCAmelCase__ : Union[str, Any] = "▁" UpperCAmelCase__ : List[str] = {"vocab_file": "sentencepiece.bpe.model"} UpperCAmelCase__ : Union[str, Any] = { "vocab_file": { "facebook/mbart-large-50-one-to-many-mmt": ( "https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model" ), } } UpperCAmelCase__ : Optional[Any] = { "facebook/mbart-large-50-one-to-many-mmt": 10_24, } # fmt: off UpperCAmelCase__ : Tuple = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"] class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Optional[int] = VOCAB_FILES_NAMES snake_case__ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ :Any = PRETRAINED_VOCAB_FILES_MAP snake_case__ :Tuple = ['input_ids', 'attention_mask'] snake_case__ :List[int] = [] snake_case__ :List[int] = [] def __init__( self : int , __magic_name__ : int , __magic_name__ : Dict=None , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[int]="</s>" , __magic_name__ : List[Any]="</s>" , __magic_name__ : List[Any]="<s>" , __magic_name__ : Tuple="<unk>" , __magic_name__ : List[Any]="<pad>" , __magic_name__ : List[Any]="<mask>" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : List[Any] , ): """simple docstring""" lowerCAmelCase__ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs lowerCAmelCase__ = kwargs.get("additional_special_tokens" , [] ) kwargs["additional_special_tokens"] += [ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=__magic_name__ , tgt_lang=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , ) lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__magic_name__ ) ) lowerCAmelCase__ = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token lowerCAmelCase__ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowerCAmelCase__ = 1 lowerCAmelCase__ = len(self.sp_model ) lowerCAmelCase__ = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__magic_name__ ) } lowerCAmelCase__ = {v: k for k, v in self.lang_code_to_id.items()} lowerCAmelCase__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) lowerCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} lowerCAmelCase__ = src_lang if src_lang is not None else "en_XX" lowerCAmelCase__ = self.lang_code_to_id[self._src_lang] lowerCAmelCase__ = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def __SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" return self._src_lang @src_lang.setter def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ): """simple docstring""" lowerCAmelCase__ = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : Dict ): """simple docstring""" lowerCAmelCase__ = self.__dict__.copy() lowerCAmelCase__ = None return state def __setstate__( self : List[Any] , __magic_name__ : Dict ): """simple docstring""" lowerCAmelCase__ = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): lowerCAmelCase__ = {} lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" lowerCAmelCase__ = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : str ): """simple docstring""" return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ): """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowerCAmelCase__ = self.sp_model.PieceToId(__magic_name__ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : int ): """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[Any] ): """simple docstring""" lowerCAmelCase__ = [] lowerCAmelCase__ = "" lowerCAmelCase__ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__magic_name__ ) + token lowerCAmelCase__ = True lowerCAmelCase__ = [] else: current_sub_tokens.append(__magic_name__ ) lowerCAmelCase__ = False out_string += self.sp_model.decode(__magic_name__ ) return out_string.strip() def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ): """simple docstring""" if not os.path.isdir(__magic_name__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ = os.path.join( __magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __magic_name__ ) elif not os.path.isfile(self.vocab_file ): with open(__magic_name__ , "wb" ) as fi: lowerCAmelCase__ = self.sp_model.serialized_model_proto() fi.write(__magic_name__ ) return (out_vocab_file,) def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ ) lowerCAmelCase__ = [1] * len(self.prefix_tokens ) lowerCAmelCase__ = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__magic_name__ )) + suffix_ones return prefix_ones + ([0] * len(__magic_name__ )) + ([0] * len(__magic_name__ )) + suffix_ones def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Optional[str] , __magic_name__ : Optional[str] , **__magic_name__ : Optional[Any] ): """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) lowerCAmelCase__ = src_lang lowerCAmelCase__ = self(__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ ) lowerCAmelCase__ = self.convert_tokens_to_ids(__magic_name__ ) lowerCAmelCase__ = tgt_lang_id return inputs def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : str = "en_XX" , __magic_name__ : Optional[List[str]] = None , __magic_name__ : str = "ro_RO" , **__magic_name__ : Union[str, Any] , ): """simple docstring""" lowerCAmelCase__ = src_lang lowerCAmelCase__ = tgt_lang return super().prepare_seqaseq_batch(__magic_name__ , __magic_name__ , **__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ): """simple docstring""" lowerCAmelCase__ = self.lang_code_to_id[src_lang] lowerCAmelCase__ = [self.cur_lang_code_id] lowerCAmelCase__ = [self.eos_token_id] def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ): """simple docstring""" lowerCAmelCase__ = self.lang_code_to_id[tgt_lang] lowerCAmelCase__ = [self.cur_lang_code_id] lowerCAmelCase__ = [self.eos_token_id]
48
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mobilebert import MobileBertTokenizer UpperCAmelCase__ : str = logging.get_logger(__name__) UpperCAmelCase__ : Dict = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} UpperCAmelCase__ : Any = { "vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"}, "tokenizer_file": { "mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json" }, } UpperCAmelCase__ : Optional[Any] = {"mobilebert-uncased": 5_12} UpperCAmelCase__ : Dict = {} class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :str = VOCAB_FILES_NAMES snake_case__ :List[Any] = PRETRAINED_VOCAB_FILES_MAP snake_case__ :Optional[int] = PRETRAINED_INIT_CONFIGURATION snake_case__ :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ :Union[str, Any] = MobileBertTokenizer def __init__( self : List[Any] , __magic_name__ : str=None , __magic_name__ : List[Any]=None , __magic_name__ : List[Any]=True , __magic_name__ : Tuple="[UNK]" , __magic_name__ : int="[SEP]" , __magic_name__ : List[str]="[PAD]" , __magic_name__ : List[str]="[CLS]" , __magic_name__ : int="[MASK]" , __magic_name__ : Dict=True , __magic_name__ : Any=None , **__magic_name__ : List[str] , ): """simple docstring""" super().__init__( __magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , tokenize_chinese_chars=__magic_name__ , strip_accents=__magic_name__ , **__magic_name__ , ) lowerCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , __magic_name__ ) != do_lower_case or normalizer_state.get("strip_accents" , __magic_name__ ) != strip_accents or normalizer_state.get("handle_chinese_chars" , __magic_name__ ) != tokenize_chinese_chars ): lowerCAmelCase__ = getattr(__magic_name__ , normalizer_state.pop("type" ) ) lowerCAmelCase__ = do_lower_case lowerCAmelCase__ = strip_accents lowerCAmelCase__ = tokenize_chinese_chars lowerCAmelCase__ = normalizer_class(**__magic_name__ ) lowerCAmelCase__ = do_lower_case def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Dict , __magic_name__ : List[Any]=None ): """simple docstring""" lowerCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ): """simple docstring""" lowerCAmelCase__ = [self.sep_token_id] lowerCAmelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : str , __magic_name__ : Optional[str] = None ): """simple docstring""" lowerCAmelCase__ = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ ) return tuple(__magic_name__ )
48
'''simple docstring''' from random import randint from tempfile import TemporaryFile import numpy as np def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] ) -> Dict: '''simple docstring''' lowerCAmelCase__ = 0 if start < end: lowerCAmelCase__ = randint(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ = a[end] lowerCAmelCase__ = a[pivot] lowerCAmelCase__ = temp lowerCAmelCase__ ,lowerCAmelCase__ = _in_place_partition(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) count += _in_place_quick_sort(UpperCamelCase_ , UpperCamelCase_ , p - 1 ) count += _in_place_quick_sort(UpperCamelCase_ , p + 1 , UpperCamelCase_ ) return count def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any ) -> Dict: '''simple docstring''' lowerCAmelCase__ = 0 lowerCAmelCase__ = randint(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ = a[end] lowerCAmelCase__ = a[pivot] lowerCAmelCase__ = temp lowerCAmelCase__ = start - 1 for index in range(UpperCamelCase_ , UpperCamelCase_ ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value lowerCAmelCase__ = new_pivot_index + 1 lowerCAmelCase__ = a[new_pivot_index] lowerCAmelCase__ = a[index] lowerCAmelCase__ = temp lowerCAmelCase__ = a[new_pivot_index + 1] lowerCAmelCase__ = a[end] lowerCAmelCase__ = temp return new_pivot_index + 1, count UpperCAmelCase__ : Tuple = TemporaryFile() UpperCAmelCase__ : List[str] = 1_00 # 1000 elements are to be sorted UpperCAmelCase__ , UpperCAmelCase__ : Dict = 0, 1 # mean and standard deviation UpperCAmelCase__ : Tuple = np.random.normal(mu, sigma, p) np.save(outfile, X) print("The array is") print(X) outfile.seek(0) # using the same array UpperCAmelCase__ : Optional[Any] = np.load(outfile) UpperCAmelCase__ : Any = len(M) - 1 UpperCAmelCase__ : Tuple = _in_place_quick_sort(M, 0, r) print( "No of Comparisons for 100 elements selected from a standard normal distribution" "is :" ) print(z)
48
1
'''simple docstring''' def A ( UpperCamelCase_ : str , UpperCamelCase_ : int ) -> str: '''simple docstring''' lowerCAmelCase__ = [[] for _ in range(UpperCamelCase_ )] lowerCAmelCase__ = key - 1 if key <= 0: raise ValueError("Height of grid can't be 0 or negative" ) if key == 1 or len(UpperCamelCase_ ) <= key: return input_string for position, character in enumerate(UpperCamelCase_ ): lowerCAmelCase__ = position % (lowest * 2) # puts it in bounds lowerCAmelCase__ = min(UpperCamelCase_ , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append(UpperCamelCase_ ) lowerCAmelCase__ = ["".join(UpperCamelCase_ ) for row in temp_grid] lowerCAmelCase__ = "".join(UpperCamelCase_ ) return output_string def A ( UpperCamelCase_ : str , UpperCamelCase_ : int ) -> str: '''simple docstring''' lowerCAmelCase__ = [] lowerCAmelCase__ = key - 1 if key <= 0: raise ValueError("Height of grid can't be 0 or negative" ) if key == 1: return input_string lowerCAmelCase__ = [[] for _ in range(UpperCamelCase_ )] # generates template for position in range(len(UpperCamelCase_ ) ): lowerCAmelCase__ = position % (lowest * 2) # puts it in bounds lowerCAmelCase__ = min(UpperCamelCase_ , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append("*" ) lowerCAmelCase__ = 0 for row in temp_grid: # fills in the characters lowerCAmelCase__ = input_string[counter : counter + len(UpperCamelCase_ )] grid.append(list(UpperCamelCase_ ) ) counter += len(UpperCamelCase_ ) lowerCAmelCase__ = "" # reads as zigzag for position in range(len(UpperCamelCase_ ) ): lowerCAmelCase__ = position % (lowest * 2) # puts it in bounds lowerCAmelCase__ = min(UpperCamelCase_ , lowest * 2 - num ) # creates zigzag pattern output_string += grid[num][0] grid[num].pop(0 ) return output_string def A ( UpperCamelCase_ : str ) -> dict[int, str]: '''simple docstring''' lowerCAmelCase__ = {} for key_guess in range(1 , len(UpperCamelCase_ ) ): # tries every key lowerCAmelCase__ = decrypt(UpperCamelCase_ , UpperCamelCase_ ) return results if __name__ == "__main__": import doctest doctest.testmod()
48
'''simple docstring''' import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def A ( UpperCamelCase_ : List[Any] ) -> Tuple: '''simple docstring''' if "img_encoder.pos_embed" in name: lowerCAmelCase__ = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" ) if "img_encoder.patch_embed.proj" in name: lowerCAmelCase__ = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" ) if "img_encoder.patch_embed.norm" in name: lowerCAmelCase__ = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" ) if "img_encoder.layers" in name: lowerCAmelCase__ = name.replace("img_encoder.layers" , "vision_model.encoder.stages" ) if "blocks" in name and "res" not in name: lowerCAmelCase__ = name.replace("blocks" , "layers" ) if "attn" in name and "pre_assign" not in name: lowerCAmelCase__ = name.replace("attn" , "self_attn" ) if "proj" in name and "self_attn" in name and "text" not in name: lowerCAmelCase__ = name.replace("proj" , "out_proj" ) if "pre_assign_attn.attn.proj" in name: lowerCAmelCase__ = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" ) if "norm1" in name: lowerCAmelCase__ = name.replace("norm1" , "layer_norm1" ) if "norm2" in name and "pre_assign" not in name: lowerCAmelCase__ = name.replace("norm2" , "layer_norm2" ) if "img_encoder.norm" in name: lowerCAmelCase__ = name.replace("img_encoder.norm" , "vision_model.layernorm" ) # text encoder if "text_encoder.token_embedding" in name: lowerCAmelCase__ = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" ) if "text_encoder.positional_embedding" in name: lowerCAmelCase__ = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" ) if "text_encoder.transformer.resblocks." in name: lowerCAmelCase__ = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." ) if "ln_1" in name: lowerCAmelCase__ = name.replace("ln_1" , "layer_norm1" ) if "ln_2" in name: lowerCAmelCase__ = name.replace("ln_2" , "layer_norm2" ) if "c_fc" in name: lowerCAmelCase__ = name.replace("c_fc" , "fc1" ) if "c_proj" in name: lowerCAmelCase__ = name.replace("c_proj" , "fc2" ) if "text_encoder" in name: lowerCAmelCase__ = name.replace("text_encoder" , "text_model" ) if "ln_final" in name: lowerCAmelCase__ = name.replace("ln_final" , "final_layer_norm" ) # projection layers if "img_projector.linear_hidden." in name: lowerCAmelCase__ = name.replace("img_projector.linear_hidden." , "visual_projection." ) if "img_projector.linear_out." in name: lowerCAmelCase__ = name.replace("img_projector.linear_out." , "visual_projection.3." ) if "text_projector.linear_hidden" in name: lowerCAmelCase__ = name.replace("text_projector.linear_hidden" , "text_projection" ) if "text_projector.linear_out" in name: lowerCAmelCase__ = name.replace("text_projector.linear_out" , "text_projection.3" ) return name def A ( UpperCamelCase_ : str , UpperCamelCase_ : str ) -> List[Any]: '''simple docstring''' for key in orig_state_dict.copy().keys(): lowerCAmelCase__ = orig_state_dict.pop(UpperCamelCase_ ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors lowerCAmelCase__ = key.split("." ) lowerCAmelCase__ ,lowerCAmelCase__ = int(key_split[2] ), int(key_split[4] ) lowerCAmelCase__ = config.vision_config.hidden_size if "weight" in key: lowerCAmelCase__ = val[:dim, :] lowerCAmelCase__ = val[dim : dim * 2, :] lowerCAmelCase__ = val[-dim:, :] else: lowerCAmelCase__ = val[:dim] lowerCAmelCase__ = val[dim : dim * 2] lowerCAmelCase__ = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors lowerCAmelCase__ = key.split("." ) lowerCAmelCase__ = int(key_split[3] ) lowerCAmelCase__ = config.text_config.hidden_size if "weight" in key: lowerCAmelCase__ = val[:dim, :] lowerCAmelCase__ = val[ dim : dim * 2, : ] lowerCAmelCase__ = val[-dim:, :] else: lowerCAmelCase__ = val[:dim] lowerCAmelCase__ = val[dim : dim * 2] lowerCAmelCase__ = val[-dim:] else: lowerCAmelCase__ = rename_key(UpperCamelCase_ ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): lowerCAmelCase__ = val.squeeze_() else: lowerCAmelCase__ = val return orig_state_dict def A ( ) -> Optional[int]: '''simple docstring''' lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ) return im @torch.no_grad() def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple="groupvit-gcc-yfcc" , UpperCamelCase_ : Dict=False ) -> Any: '''simple docstring''' lowerCAmelCase__ = GroupViTConfig() lowerCAmelCase__ = GroupViTModel(UpperCamelCase_ ).eval() lowerCAmelCase__ = torch.load(UpperCamelCase_ , map_location="cpu" )["model"] lowerCAmelCase__ = convert_state_dict(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ ,lowerCAmelCase__ = model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCamelCase_ ) == 0) # verify result lowerCAmelCase__ = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" ) lowerCAmelCase__ = prepare_img() lowerCAmelCase__ = processor(text=["a photo of a cat", "a photo of a dog"] , images=UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors="pt" ) with torch.no_grad(): lowerCAmelCase__ = model(**UpperCamelCase_ ) if model_name == "groupvit-gcc-yfcc": lowerCAmelCase__ = torch.tensor([[13.3_523, 6.3_629]] ) elif model_name == "groupvit-gcc-redcaps": lowerCAmelCase__ = torch.tensor([[16.1_873, 8.6_230]] ) else: raise ValueError(F"""Model name {model_name} not supported.""" ) assert torch.allclose(outputs.logits_per_image , UpperCamelCase_ , atol=1E-3 ) processor.save_pretrained(UpperCamelCase_ ) model.save_pretrained(UpperCamelCase_ ) print("Successfully saved processor and model to" , UpperCamelCase_ ) if push_to_hub: print("Pushing to the hub..." ) processor.push_to_hub(UpperCamelCase_ , organization="nielsr" ) model.push_to_hub(UpperCamelCase_ , organization="nielsr" ) if __name__ == "__main__": UpperCAmelCase__ : List[str] = argparse.ArgumentParser() parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model." ) parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint") parser.add_argument( "--model_name", default="groupvit-gccy-fcc", type=str, help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'", ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.", ) UpperCAmelCase__ : Any = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
48
1
'''simple docstring''' from math import sqrt def A ( UpperCamelCase_ : int ) -> int: '''simple docstring''' lowerCAmelCase__ = 0 for i in range(1 , int(sqrt(UpperCamelCase_ ) + 1 ) ): if n % i == 0 and i != sqrt(UpperCamelCase_ ): total += i + n // i elif i == sqrt(UpperCamelCase_ ): total += i return total - n def A ( UpperCamelCase_ : int = 1_00_00 ) -> int: '''simple docstring''' lowerCAmelCase__ = sum( i for i in range(1 , UpperCamelCase_ ) if sum_of_divisors(sum_of_divisors(UpperCamelCase_ ) ) == i and sum_of_divisors(UpperCamelCase_ ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
48
'''simple docstring''' from __future__ import annotations from functools import lru_cache from math import ceil UpperCAmelCase__ : Optional[Any] = 1_00 UpperCAmelCase__ : Any = set(range(3, NUM_PRIMES, 2)) primes.add(2) UpperCAmelCase__ : int for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=1_00 ) def A ( UpperCamelCase_ : int ) -> set[int]: '''simple docstring''' if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} lowerCAmelCase__ = set() lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def A ( UpperCamelCase_ : int = 50_00 ) -> int | None: '''simple docstring''' for number_to_partition in range(1 , UpperCamelCase_ ): if len(partition(UpperCamelCase_ ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(F"{solution() = }")
48
1
'''simple docstring''' import re def A ( UpperCamelCase_ : str ) -> str: '''simple docstring''' if len(re.findall("[ATCG]" , UpperCamelCase_ ) ) != len(UpperCamelCase_ ): raise ValueError("Invalid Strand" ) return dna.translate(dna.maketrans("ATCG" , "TAGC" ) ) if __name__ == "__main__": import doctest doctest.testmod()
48
'''simple docstring''' import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase__ : List[Any] = logging.get_logger(__name__) UpperCAmelCase__ : List[str] = {"vocab_file": "vocab.json"} UpperCAmelCase__ : Optional[Any] = { "vocab_file": { "mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json", } } UpperCAmelCase__ : Union[str, Any] = {"mgp-str": 27} class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Any = VOCAB_FILES_NAMES snake_case__ :Dict = PRETRAINED_VOCAB_FILES_MAP snake_case__ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : int="[GO]" , __magic_name__ : Optional[Any]="[GO]" , __magic_name__ : List[str]="[s]" , __magic_name__ : str="[GO]" , **__magic_name__ : List[Any] ): """simple docstring""" super().__init__( unk_token=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , pad_token=__magic_name__ , **__magic_name__ , ) with open(__magic_name__ , encoding="utf-8" ) as vocab_handle: lowerCAmelCase__ = json.load(__magic_name__ ) lowerCAmelCase__ = {v: k for k, v in self.vocab.items()} @property def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" return len(self.vocab ) def __SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Dict ): """simple docstring""" lowerCAmelCase__ = [] for s in text: char_tokens.extend(__magic_name__ ) return char_tokens def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str ): """simple docstring""" return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) ) def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : Tuple ): """simple docstring""" return self.decoder.get(__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str , __magic_name__ : Optional[str] = None ): """simple docstring""" if not os.path.isdir(__magic_name__ ): logger.error("Vocabulary path ({}) should be a directory".format(__magic_name__ ) ) return lowerCAmelCase__ = os.path.join( __magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) with open(__magic_name__ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=__magic_name__ , ensure_ascii=__magic_name__ ) + "\n" ) return (vocab_file,)
48
1
'''simple docstring''' from __future__ import annotations def A ( UpperCamelCase_ : int = 4 ) -> list[list[int]]: '''simple docstring''' lowerCAmelCase__ = abs(UpperCamelCase_ ) or 4 return [[1 + x + y * row_size for x in range(UpperCamelCase_ )] for y in range(UpperCamelCase_ )] def A ( UpperCamelCase_ : list[list[int]] ) -> list[list[int]]: '''simple docstring''' return reverse_row(transpose(UpperCamelCase_ ) ) # OR.. transpose(reverse_column(matrix)) def A ( UpperCamelCase_ : list[list[int]] ) -> list[list[int]]: '''simple docstring''' return reverse_row(reverse_column(UpperCamelCase_ ) ) # OR.. reverse_column(reverse_row(matrix)) def A ( UpperCamelCase_ : list[list[int]] ) -> list[list[int]]: '''simple docstring''' return reverse_column(transpose(UpperCamelCase_ ) ) # OR.. transpose(reverse_row(matrix)) def A ( UpperCamelCase_ : list[list[int]] ) -> list[list[int]]: '''simple docstring''' lowerCAmelCase__ = [list(UpperCamelCase_ ) for x in zip(*UpperCamelCase_ )] return matrix def A ( UpperCamelCase_ : list[list[int]] ) -> list[list[int]]: '''simple docstring''' lowerCAmelCase__ = matrix[::-1] return matrix def A ( UpperCamelCase_ : list[list[int]] ) -> list[list[int]]: '''simple docstring''' lowerCAmelCase__ = [x[::-1] for x in matrix] return matrix def A ( UpperCamelCase_ : list[list[int]] ) -> None: '''simple docstring''' for i in matrix: print(*UpperCamelCase_ ) if __name__ == "__main__": UpperCAmelCase__ : str = make_matrix() print("\norigin:\n") print_matrix(matrix) print("\nrotate 90 counterclockwise:\n") print_matrix(rotate_aa(matrix)) UpperCAmelCase__ : Optional[Any] = make_matrix() print("\norigin:\n") print_matrix(matrix) print("\nrotate 180:\n") print_matrix(rotate_aaa(matrix)) UpperCAmelCase__ : int = make_matrix() print("\norigin:\n") print_matrix(matrix) print("\nrotate 270 counterclockwise:\n") print_matrix(rotate_aaa(matrix))
48
'''simple docstring''' from math import sqrt def A ( UpperCamelCase_ : int ) -> int: '''simple docstring''' lowerCAmelCase__ = 0 for i in range(1 , int(sqrt(UpperCamelCase_ ) + 1 ) ): if n % i == 0 and i != sqrt(UpperCamelCase_ ): total += i + n // i elif i == sqrt(UpperCamelCase_ ): total += i return total - n def A ( UpperCamelCase_ : int = 1_00_00 ) -> int: '''simple docstring''' lowerCAmelCase__ = sum( i for i in range(1 , UpperCamelCase_ ) if sum_of_divisors(sum_of_divisors(UpperCamelCase_ ) ) == i and sum_of_divisors(UpperCamelCase_ ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
48
1
'''simple docstring''' import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def A ( UpperCamelCase_ : dict ) -> tuple: '''simple docstring''' return (data["data"], data["target"]) def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray ) -> XGBClassifier: '''simple docstring''' lowerCAmelCase__ = XGBClassifier() classifier.fit(UpperCamelCase_ , UpperCamelCase_ ) return classifier def A ( ) -> None: '''simple docstring''' lowerCAmelCase__ = load_iris() lowerCAmelCase__ ,lowerCAmelCase__ = data_handling(UpperCamelCase_ ) lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = train_test_split( UpperCamelCase_ , UpperCamelCase_ , test_size=0.25 ) lowerCAmelCase__ = iris["target_names"] # Create an XGBoost Classifier from the training data lowerCAmelCase__ = xgboost(UpperCamelCase_ , UpperCamelCase_ ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , display_labels=UpperCamelCase_ , cmap="Blues" , normalize="true" , ) plt.title("Normalized Confusion Matrix - IRIS Dataset" ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
48
'''simple docstring''' import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format="%(message)s") def A ( UpperCamelCase_ : np.ndarray ) -> np.ndarray: '''simple docstring''' return input_array.reshape((input_array.size, 1) ) def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray: '''simple docstring''' lowerCAmelCase__ = np.nan for i in range(UpperCamelCase_ ): lowerCAmelCase__ = features[:, labels == i] lowerCAmelCase__ = data.mean(1 ) # Centralize the data of class i lowerCAmelCase__ = data - column_reshape(UpperCamelCase_ ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(UpperCamelCase_ , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) lowerCAmelCase__ = np.dot(UpperCamelCase_ , centered_data.T ) return covariance_sum / features.shape[1] def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray: '''simple docstring''' lowerCAmelCase__ = features.mean(1 ) lowerCAmelCase__ = np.nan for i in range(UpperCamelCase_ ): lowerCAmelCase__ = features[:, labels == i] lowerCAmelCase__ = data.shape[1] lowerCAmelCase__ = data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ ) , (column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) lowerCAmelCase__ = device_data * np.dot( column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ ) , (column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ )).T , ) return covariance_sum / features.shape[1] def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray: '''simple docstring''' if features.any(): lowerCAmelCase__ = features.mean(1 ) # Center the dataset lowerCAmelCase__ = features - np.reshape(UpperCamelCase_ , (data_mean.size, 1) ) lowerCAmelCase__ = np.dot(UpperCamelCase_ , centered_data.T ) / features.shape[1] lowerCAmelCase__ ,lowerCAmelCase__ = np.linalg.eigh(UpperCamelCase_ ) # Take all the columns in the reverse order (-1), and then takes only the first lowerCAmelCase__ = eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space lowerCAmelCase__ = np.dot(filtered_eigenvectors.T , UpperCamelCase_ ) logging.info("Principal Component Analysis computed" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=UpperCamelCase_ ) logging.error("Dataset empty" ) raise AssertionError def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> np.ndarray: '''simple docstring''' assert classes > dimensions # Check if features have been already loaded if features.any: lowerCAmelCase__ ,lowerCAmelCase__ = eigh( covariance_between_classes(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , covariance_within_classes(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , ) lowerCAmelCase__ = eigenvectors[:, ::-1][:, :dimensions] lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = np.linalg.svd(UpperCamelCase_ ) lowerCAmelCase__ = svd_matrix[:, 0:dimensions] lowerCAmelCase__ = np.dot(filtered_svd_matrix.T , UpperCamelCase_ ) logging.info("Linear Discriminant Analysis computed" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=UpperCamelCase_ ) logging.error("Dataset empty" ) raise AssertionError def A ( ) -> None: '''simple docstring''' lowerCAmelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) lowerCAmelCase__ = np.array([0, 0, 0, 1, 1] ) lowerCAmelCase__ = 2 lowerCAmelCase__ = 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(UpperCamelCase_ ) as error_info: lowerCAmelCase__ = linear_discriminant_analysis( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) if isinstance(UpperCamelCase_ , np.ndarray ): raise AssertionError( "Did not raise AssertionError for dimensions > classes" ) assert error_info.type is AssertionError def A ( ) -> None: '''simple docstring''' lowerCAmelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) lowerCAmelCase__ = 2 lowerCAmelCase__ = np.array([[6.92_820_323, 8.66_025_404, 10.39_230_485], [3.0, 3.0, 3.0]] ) with pytest.raises(UpperCamelCase_ ) as error_info: lowerCAmelCase__ = principal_component_analysis(UpperCamelCase_ , UpperCamelCase_ ) if not np.allclose(UpperCamelCase_ , UpperCamelCase_ ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
48
1
'''simple docstring''' import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class A : def __init__( self : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : str=99 , __magic_name__ : List[str]=13 , __magic_name__ : Any=16 , __magic_name__ : Any=7 , __magic_name__ : List[str]=True , __magic_name__ : int=True , __magic_name__ : Tuple=True , __magic_name__ : str=False , __magic_name__ : int=True , __magic_name__ : Optional[Any]=2 , __magic_name__ : List[Any]=32 , __magic_name__ : int=4 , __magic_name__ : str=4 , __magic_name__ : Optional[int]=30 , __magic_name__ : Optional[Any]=0 , __magic_name__ : List[Any]=1 , __magic_name__ : List[str]=2 , __magic_name__ : Optional[Any]=None , ): """simple docstring""" lowerCAmelCase__ = parent lowerCAmelCase__ = batch_size lowerCAmelCase__ = decoder_seq_length # For common tests lowerCAmelCase__ = self.decoder_seq_length lowerCAmelCase__ = is_training lowerCAmelCase__ = use_attention_mask lowerCAmelCase__ = use_labels lowerCAmelCase__ = vocab_size lowerCAmelCase__ = d_model lowerCAmelCase__ = d_model lowerCAmelCase__ = decoder_layers lowerCAmelCase__ = decoder_layers lowerCAmelCase__ = decoder_ffn_dim lowerCAmelCase__ = decoder_attention_heads lowerCAmelCase__ = decoder_attention_heads lowerCAmelCase__ = eos_token_id lowerCAmelCase__ = bos_token_id lowerCAmelCase__ = pad_token_id lowerCAmelCase__ = decoder_start_token_id lowerCAmelCase__ = use_cache lowerCAmelCase__ = max_position_embeddings lowerCAmelCase__ = None lowerCAmelCase__ = decoder_seq_length lowerCAmelCase__ = 2 lowerCAmelCase__ = 1 def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) lowerCAmelCase__ = None if self.use_attention_mask: lowerCAmelCase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) lowerCAmelCase__ = None if self.use_labels: lowerCAmelCase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) lowerCAmelCase__ = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Tuple , ): """simple docstring""" lowerCAmelCase__ = True lowerCAmelCase__ = TrOCRDecoder(config=__magic_name__ ).to(__magic_name__ ).eval() lowerCAmelCase__ = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass lowerCAmelCase__ = model(__magic_name__ , use_cache=__magic_name__ ) lowerCAmelCase__ = model(__magic_name__ ) lowerCAmelCase__ = model(__magic_name__ , use_cache=__magic_name__ ) self.parent.assertTrue(len(__magic_name__ ) == len(__magic_name__ ) ) self.parent.assertTrue(len(__magic_name__ ) == len(__magic_name__ ) + 1 ) lowerCAmelCase__ = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids lowerCAmelCase__ = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and lowerCAmelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCAmelCase__ = model(__magic_name__ )["last_hidden_state"] lowerCAmelCase__ = model(__magic_name__ , past_key_values=__magic_name__ )["last_hidden_state"] # select random slice lowerCAmelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCAmelCase__ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() lowerCAmelCase__ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) def __SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" lowerCAmelCase__ = self.prepare_config_and_inputs() lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = config_and_inputs lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict @require_torch class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): snake_case__ :Tuple = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () snake_case__ :Optional[int] = (TrOCRForCausalLM,) if is_torch_available() else () snake_case__ :Optional[Any] = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {} snake_case__ :Dict = True snake_case__ :Optional[Any] = False def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" lowerCAmelCase__ = TrOCRStandaloneDecoderModelTester(self , is_training=__magic_name__ ) lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" self.config_tester.run_common_tests() def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" return @unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :) def __SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" pass
48
'''simple docstring''' def A ( UpperCamelCase_ : str , UpperCamelCase_ : int ) -> list: '''simple docstring''' lowerCAmelCase__ = word.split() def justify(UpperCamelCase_ : list , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> str: lowerCAmelCase__ = max_width - width lowerCAmelCase__ = len(UpperCamelCase_ ) if len(UpperCamelCase_ ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: lowerCAmelCase__ = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] lowerCAmelCase__ = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] lowerCAmelCase__ = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(UpperCamelCase_ ): num_spaces_between_words_list[i] += 1 lowerCAmelCase__ = [] for i in range(UpperCamelCase_ ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * " " ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(UpperCamelCase_ ) lowerCAmelCase__ = [] lowerCAmelCase__ = [] lowerCAmelCase__ = 0 for word in words: if width + len(UpperCamelCase_ ) + len(UpperCamelCase_ ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(UpperCamelCase_ ) width += len(UpperCamelCase_ ) else: # justify the line and add it to result answer.append(justify(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) ) # reset new line and new width lowerCAmelCase__ ,lowerCAmelCase__ = [word], len(UpperCamelCase_ ) lowerCAmelCase__ = max_width - width - len(UpperCamelCase_ ) answer.append(" ".join(UpperCamelCase_ ) + (remaining_spaces + 1) * " " ) return answer if __name__ == "__main__": from doctest import testmod testmod()
48
1
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def A ( UpperCamelCase_ : Tuple ) -> int: '''simple docstring''' for param in module.parameters(): lowerCAmelCase__ = False def A ( ) -> Tuple: '''simple docstring''' lowerCAmelCase__ = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): lowerCAmelCase__ = "mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def A ( UpperCamelCase_ : Optional[int] ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase__ = plt.imshow(UpperCamelCase_ ) fig.axes.get_xaxis().set_visible(UpperCamelCase_ ) fig.axes.get_yaxis().set_visible(UpperCamelCase_ ) plt.show() def A ( ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase__ = datetime.now() lowerCAmelCase__ = current_time.strftime("%H:%M:%S" ) return timestamp
48
'''simple docstring''' import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 UpperCAmelCase__ : str = sys.version_info >= (3, 10) def A ( UpperCamelCase_ : Any=None , UpperCamelCase_ : List[Any]=None ) -> Optional[int]: '''simple docstring''' return field(default_factory=lambda: default , metadata=UpperCamelCase_ ) @dataclass class A : snake_case__ :int snake_case__ :float snake_case__ :str snake_case__ :bool @dataclass class A : snake_case__ :int = 42 snake_case__ :str = field(default='toto' , metadata={'help': 'help message'} ) @dataclass class A : snake_case__ :bool = False snake_case__ :bool = True snake_case__ :Optional[bool] = None class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Any = 'titi' snake_case__ :Optional[int] = 'toto' class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Union[str, Any] = 'titi' snake_case__ :str = 'toto' snake_case__ :int = 42 @dataclass class A : snake_case__ :BasicEnum = "toto" def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = BasicEnum(self.foo ) @dataclass class A : snake_case__ :MixedTypeEnum = "toto" def __SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" lowerCAmelCase__ = MixedTypeEnum(self.foo ) @dataclass class A : snake_case__ :Optional[int] = None snake_case__ :Optional[float] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} ) snake_case__ :Optional[str] = None snake_case__ :Optional[List[str]] = list_field(default=[] ) snake_case__ :Optional[List[int]] = list_field(default=[] ) @dataclass class A : snake_case__ :List[int] = list_field(default=[] ) snake_case__ :List[int] = list_field(default=[1, 2, 3] ) snake_case__ :List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) snake_case__ :List[float] = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class A : snake_case__ :List[int] = field() snake_case__ :str = field() snake_case__ :BasicEnum = field() def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" lowerCAmelCase__ = BasicEnum(self.required_enum ) @dataclass class A : snake_case__ :int snake_case__ :"BasicEnum" = field() snake_case__ :"Optional[bool]" = None snake_case__ :"str" = field(default='toto' , metadata={'help': 'help message'} ) snake_case__ :"List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) if is_python_no_less_than_3_10: @dataclass class A : snake_case__ :bool = False snake_case__ :bool = True snake_case__ :bool | None = None @dataclass class A : snake_case__ :int | None = None snake_case__ :float | None = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} ) snake_case__ :str | None = None snake_case__ :list[str] | None = list_field(default=[] ) snake_case__ :list[int] | None = list_field(default=[] ) class A ( unittest.TestCase ): def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : argparse.ArgumentParser , __magic_name__ : argparse.ArgumentParser ): """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): lowerCAmelCase__ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"} lowerCAmelCase__ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get("choices" , __magic_name__ ) and yy.get("choices" , __magic_name__ ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx["type"](__magic_name__ ) , yy["type"](__magic_name__ ) ) del xx["type"], yy["type"] self.assertEqual(__magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ ) expected.add_argument("--bar" , type=__magic_name__ , required=__magic_name__ ) expected.add_argument("--baz" , type=__magic_name__ , required=__magic_name__ ) expected.add_argument("--flag" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" ) self.argparsersEqual(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = ["--foo", "1", "--baz", "quux", "--bar", "0.5"] ((lowerCAmelCase__) ,) = parser.parse_args_into_dataclasses(__magic_name__ , look_for_args_file=__magic_name__ ) self.assertFalse(example.flag ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument("--foo" , default=42 , type=__magic_name__ ) expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" ) self.argparsersEqual(__magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument("--foo" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" ) expected.add_argument("--baz" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument("--no_baz" , action="store_false" , default=__magic_name__ , dest="baz" ) expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ ) lowerCAmelCase__ = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(__magic_name__ ) for dataclass_type in dataclass_types: lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) self.argparsersEqual(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = parser.parse_args([] ) self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) ) lowerCAmelCase__ = parser.parse_args(["--foo", "--no_baz"] ) self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) ) lowerCAmelCase__ = parser.parse_args(["--foo", "--baz"] ) self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) ) lowerCAmelCase__ = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] ) self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) ) lowerCAmelCase__ = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] ) self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument( "--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , ) self.argparsersEqual(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = parser.parse_args([] ) self.assertEqual(args.foo , "toto" ) lowerCAmelCase__ = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) lowerCAmelCase__ = parser.parse_args(["--foo", "titi"] ) self.assertEqual(args.foo , "titi" ) lowerCAmelCase__ = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) lowerCAmelCase__ = parser.parse_args(["--foo", "42"] ) self.assertEqual(args.foo , 42 ) lowerCAmelCase__ = parser.parse_args_into_dataclasses(["--foo", "42"] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def __SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" @dataclass class A : snake_case__ :Literal["titi", "toto", 42] = "toto" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument( "--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , ) self.argparsersEqual(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = parser.parse_args([] ) self.assertEqual(args.foo , "toto" ) lowerCAmelCase__ = parser.parse_args(["--foo", "titi"] ) self.assertEqual(args.foo , "titi" ) lowerCAmelCase__ = parser.parse_args(["--foo", "42"] ) self.assertEqual(args.foo , 42 ) def __SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument("--foo_int" , nargs="+" , default=[] , type=__magic_name__ ) expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=__magic_name__ ) expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ ) expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=__magic_name__ ) self.argparsersEqual(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = parser.parse_args([] ) self.assertEqual( __magic_name__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , ) lowerCAmelCase__ = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() ) self.assertEqual(__magic_name__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) ) def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument("--foo" , default=__magic_name__ , type=__magic_name__ ) expected.add_argument("--bar" , default=__magic_name__ , type=__magic_name__ , help="help message" ) expected.add_argument("--baz" , default=__magic_name__ , type=__magic_name__ ) expected.add_argument("--ces" , nargs="+" , default=[] , type=__magic_name__ ) expected.add_argument("--des" , nargs="+" , default=[] , type=__magic_name__ ) lowerCAmelCase__ = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(__magic_name__ ) for dataclass_type in dataclass_types: lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) self.argparsersEqual(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = parser.parse_args([] ) self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , bar=__magic_name__ , baz=__magic_name__ , ces=[] , des=[] ) ) lowerCAmelCase__ = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() ) self.assertEqual(__magic_name__ , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) ) def __SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument("--required_list" , nargs="+" , type=__magic_name__ , required=__magic_name__ ) expected.add_argument("--required_str" , type=__magic_name__ , required=__magic_name__ ) expected.add_argument( "--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , ) self.argparsersEqual(__magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ ) expected.add_argument( "--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , ) expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ ) expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" ) expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ ) self.argparsersEqual(__magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } lowerCAmelCase__ = parser.parse_dict(__magic_name__ )[0] lowerCAmelCase__ = BasicExample(**__magic_name__ ) self.assertEqual(__magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, "extra": 42, } self.assertRaises(__magic_name__ , parser.parse_dict , __magic_name__ , allow_extra_keys=__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase__ = os.path.join(__magic_name__ , "temp_json" ) os.mkdir(__magic_name__ ) with open(temp_local_path + ".json" , "w+" ) as f: json.dump(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0] lowerCAmelCase__ = BasicExample(**__magic_name__ ) self.assertEqual(__magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase__ = os.path.join(__magic_name__ , "temp_yaml" ) os.mkdir(__magic_name__ ) with open(temp_local_path + ".yaml" , "w+" ) as f: yaml.dump(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0] lowerCAmelCase__ = BasicExample(**__magic_name__ ) self.assertEqual(__magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) self.assertIsNotNone(__magic_name__ )
48
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification def A ( UpperCamelCase_ : int ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase__ = SwinvaConfig() lowerCAmelCase__ = swinva_name.split("_" ) lowerCAmelCase__ = name_split[1] if "to" in name_split[3]: lowerCAmelCase__ = int(name_split[3][-3:] ) else: lowerCAmelCase__ = int(name_split[3] ) if "to" in name_split[2]: lowerCAmelCase__ = int(name_split[2][-2:] ) else: lowerCAmelCase__ = int(name_split[2][6:] ) if model_size == "tiny": lowerCAmelCase__ = 96 lowerCAmelCase__ = (2, 2, 6, 2) lowerCAmelCase__ = (3, 6, 12, 24) elif model_size == "small": lowerCAmelCase__ = 96 lowerCAmelCase__ = (2, 2, 18, 2) lowerCAmelCase__ = (3, 6, 12, 24) elif model_size == "base": lowerCAmelCase__ = 1_28 lowerCAmelCase__ = (2, 2, 18, 2) lowerCAmelCase__ = (4, 8, 16, 32) else: lowerCAmelCase__ = 1_92 lowerCAmelCase__ = (2, 2, 18, 2) lowerCAmelCase__ = (6, 12, 24, 48) if "to" in swinva_name: lowerCAmelCase__ = (12, 12, 12, 6) if ("22k" in swinva_name) and ("to" not in swinva_name): lowerCAmelCase__ = 2_18_41 lowerCAmelCase__ = "huggingface/label-files" lowerCAmelCase__ = "imagenet-22k-id2label.json" lowerCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="dataset" ) , "r" ) ) lowerCAmelCase__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()} lowerCAmelCase__ = idalabel lowerCAmelCase__ = {v: k for k, v in idalabel.items()} else: lowerCAmelCase__ = 10_00 lowerCAmelCase__ = "huggingface/label-files" lowerCAmelCase__ = "imagenet-1k-id2label.json" lowerCAmelCase__ = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="dataset" ) , "r" ) ) lowerCAmelCase__ = {int(UpperCamelCase_ ): v for k, v in idalabel.items()} lowerCAmelCase__ = idalabel lowerCAmelCase__ = {v: k for k, v in idalabel.items()} lowerCAmelCase__ = img_size lowerCAmelCase__ = num_classes lowerCAmelCase__ = embed_dim lowerCAmelCase__ = depths lowerCAmelCase__ = num_heads lowerCAmelCase__ = window_size return config def A ( UpperCamelCase_ : Dict ) -> int: '''simple docstring''' if "patch_embed.proj" in name: lowerCAmelCase__ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: lowerCAmelCase__ = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: lowerCAmelCase__ = "encoder." + name if "attn.proj" in name: lowerCAmelCase__ = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: lowerCAmelCase__ = name.replace("attn" , "attention.self" ) if "norm1" in name: lowerCAmelCase__ = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: lowerCAmelCase__ = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: lowerCAmelCase__ = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: lowerCAmelCase__ = name.replace("mlp.fc2" , "output.dense" ) if "q_bias" in name: lowerCAmelCase__ = name.replace("q_bias" , "query.bias" ) if "k_bias" in name: lowerCAmelCase__ = name.replace("k_bias" , "key.bias" ) if "v_bias" in name: lowerCAmelCase__ = name.replace("v_bias" , "value.bias" ) if "cpb_mlp" in name: lowerCAmelCase__ = name.replace("cpb_mlp" , "continuous_position_bias_mlp" ) if name == "norm.weight": lowerCAmelCase__ = "layernorm.weight" if name == "norm.bias": lowerCAmelCase__ = "layernorm.bias" if "head" in name: lowerCAmelCase__ = name.replace("head" , "classifier" ) else: lowerCAmelCase__ = "swinv2." + name return name def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] ) -> List[str]: '''simple docstring''' for key in orig_state_dict.copy().keys(): lowerCAmelCase__ = orig_state_dict.pop(UpperCamelCase_ ) if "mask" in key: continue elif "qkv" in key: lowerCAmelCase__ = key.split("." ) lowerCAmelCase__ = int(key_split[1] ) lowerCAmelCase__ = int(key_split[3] ) lowerCAmelCase__ = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: lowerCAmelCase__ = val[:dim, :] lowerCAmelCase__ = val[dim : dim * 2, :] lowerCAmelCase__ = val[-dim:, :] else: lowerCAmelCase__ = val[:dim] lowerCAmelCase__ = val[ dim : dim * 2 ] lowerCAmelCase__ = val[-dim:] else: lowerCAmelCase__ = val return orig_state_dict def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase__ = timm.create_model(UpperCamelCase_ , pretrained=UpperCamelCase_ ) timm_model.eval() lowerCAmelCase__ = get_swinva_config(UpperCamelCase_ ) lowerCAmelCase__ = SwinvaForImageClassification(UpperCamelCase_ ) model.eval() lowerCAmelCase__ = convert_state_dict(timm_model.state_dict() , UpperCamelCase_ ) model.load_state_dict(UpperCamelCase_ ) lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCAmelCase__ = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) ) lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ) lowerCAmelCase__ = image_processor(images=UpperCamelCase_ , return_tensors="pt" ) lowerCAmelCase__ = timm_model(inputs["pixel_values"] ) lowerCAmelCase__ = model(**UpperCamelCase_ ).logits assert torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) print(F"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(UpperCamelCase_ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(UpperCamelCase_ ) model.push_to_hub( repo_path_or_name=Path(UpperCamelCase_ , UpperCamelCase_ ) , organization="nandwalritik" , commit_message="Add model" , ) if __name__ == "__main__": UpperCAmelCase__ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swinv2_name", default="swinv2_tiny_patch4_window8_256", type=str, help="Name of the Swinv2 timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) UpperCAmelCase__ : Tuple = parser.parse_args() convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
48
'''simple docstring''' import sys from collections import defaultdict class A : def __init__( self : Any ): """simple docstring""" lowerCAmelCase__ = [] def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[Any] ): """simple docstring""" return self.node_position[vertex] def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[str] , __magic_name__ : List[str] ): """simple docstring""" lowerCAmelCase__ = pos def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] ): """simple docstring""" if start > size // 2 - 1: return else: if 2 * start + 2 >= size: lowerCAmelCase__ = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: lowerCAmelCase__ = 2 * start + 1 else: lowerCAmelCase__ = 2 * start + 2 if heap[smallest_child] < heap[start]: lowerCAmelCase__ ,lowerCAmelCase__ = heap[smallest_child], positions[smallest_child] lowerCAmelCase__ ,lowerCAmelCase__ = ( heap[start], positions[start], ) lowerCAmelCase__ ,lowerCAmelCase__ = temp, tempa lowerCAmelCase__ = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , __magic_name__ ) self.top_to_bottom(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : List[str] ): """simple docstring""" lowerCAmelCase__ = position[index] while index != 0: lowerCAmelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: lowerCAmelCase__ = heap[parent] lowerCAmelCase__ = position[parent] self.set_position(position[parent] , __magic_name__ ) else: lowerCAmelCase__ = val lowerCAmelCase__ = temp self.set_position(__magic_name__ , __magic_name__ ) break lowerCAmelCase__ = parent else: lowerCAmelCase__ = val lowerCAmelCase__ = temp self.set_position(__magic_name__ , 0 ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : int ): """simple docstring""" lowerCAmelCase__ = len(__magic_name__ ) // 2 - 1 for i in range(__magic_name__ , -1 , -1 ): self.top_to_bottom(__magic_name__ , __magic_name__ , len(__magic_name__ ) , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ): """simple docstring""" lowerCAmelCase__ = positions[0] lowerCAmelCase__ = sys.maxsize self.top_to_bottom(__magic_name__ , 0 , len(__magic_name__ ) , __magic_name__ ) return temp def A ( UpperCamelCase_ : List[Any] ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase__ = Heap() lowerCAmelCase__ = [0] * len(UpperCamelCase_ ) lowerCAmelCase__ = [-1] * len(UpperCamelCase_ ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph lowerCAmelCase__ = [] # Heap of Distance of vertices from their neighboring vertex lowerCAmelCase__ = [] for vertex in range(len(UpperCamelCase_ ) ): distance_tv.append(sys.maxsize ) positions.append(UpperCamelCase_ ) heap.node_position.append(UpperCamelCase_ ) lowerCAmelCase__ = [] lowerCAmelCase__ = 1 lowerCAmelCase__ = sys.maxsize for neighbor, distance in adjacency_list[0]: lowerCAmelCase__ = 0 lowerCAmelCase__ = distance heap.heapify(UpperCamelCase_ , UpperCamelCase_ ) for _ in range(1 , len(UpperCamelCase_ ) ): lowerCAmelCase__ = heap.delete_minimum(UpperCamelCase_ , UpperCamelCase_ ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) lowerCAmelCase__ = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(UpperCamelCase_ )] ): lowerCAmelCase__ = distance heap.bottom_to_top( UpperCamelCase_ , heap.get_position(UpperCamelCase_ ) , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > UpperCAmelCase__ : Optional[int] = int(input("Enter number of edges: ").strip()) UpperCAmelCase__ : str = defaultdict(list) for _ in range(edges_number): UpperCAmelCase__ : int = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
48
1
'''simple docstring''' import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( "files" , [ ["full:README.md", "dataset_infos.json"], ["empty:README.md", "dataset_infos.json"], ["dataset_infos.json"], ["full:README.md"], ] , ) def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] ) -> List[Any]: '''simple docstring''' lowerCAmelCase__ = tmp_path_factory.mktemp("dset_infos_dir" ) if "full:README.md" in files: with open(dataset_infos_dir / "README.md" , "w" ) as f: f.write("---\ndataset_info:\n dataset_size: 42\n---" ) if "empty:README.md" in files: with open(dataset_infos_dir / "README.md" , "w" ) as f: f.write("" ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f: f.write("{\"default\": {\"dataset_size\": 42}}" ) lowerCAmelCase__ = DatasetInfosDict.from_directory(UpperCamelCase_ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( "dataset_info" , [ DatasetInfo(), DatasetInfo( description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , ), ] , ) def A ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : DatasetInfo ) -> Optional[int]: '''simple docstring''' lowerCAmelCase__ = str(UpperCamelCase_ ) dataset_info.write_to_directory(UpperCamelCase_ ) lowerCAmelCase__ = DatasetInfo.from_directory(UpperCamelCase_ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(UpperCamelCase_ , "dataset_info.json" ) ) def A ( ) -> List[Any]: '''simple docstring''' lowerCAmelCase__ = DatasetInfo( description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 42}] , download_checksums={} , download_size=13_37 , post_processing_size=4_42 , dataset_size=12_34 , size_in_bytes=13_37 + 4_42 + 12_34 , ) lowerCAmelCase__ = dataset_info._to_yaml_dict() assert sorted(UpperCamelCase_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) lowerCAmelCase__ = yaml.safe_dump(UpperCamelCase_ ) lowerCAmelCase__ = yaml.safe_load(UpperCamelCase_ ) assert dataset_info_yaml_dict == reloaded def A ( ) -> str: '''simple docstring''' lowerCAmelCase__ = DatasetInfo() lowerCAmelCase__ = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( "dataset_infos_dict" , [ DatasetInfosDict(), DatasetInfosDict({"default": DatasetInfo()} ), DatasetInfosDict({"my_config_name": DatasetInfo()} ), DatasetInfosDict( { "default": DatasetInfo( description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , ) } ), DatasetInfosDict( { "v1": DatasetInfo(dataset_size=42 ), "v2": DatasetInfo(dataset_size=13_37 ), } ), ] , ) def A ( UpperCamelCase_ : Dict , UpperCamelCase_ : DatasetInfosDict ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase__ = str(UpperCamelCase_ ) dataset_infos_dict.write_to_directory(UpperCamelCase_ ) lowerCAmelCase__ = DatasetInfosDict.from_directory(UpperCamelCase_ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): lowerCAmelCase__ = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml lowerCAmelCase__ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(UpperCamelCase_ , "README.md" ) )
48
'''simple docstring''' import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase__ : Tuple = get_tests_dir("fixtures/test_sentencepiece.model") if is_sentencepiece_available(): import sentencepiece as sp UpperCAmelCase__ : Tuple = 5 UpperCAmelCase__ : List[Any] = 10 @require_sentencepiece @require_tokenizers class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): snake_case__ :Tuple = SpeechaTextTokenizer snake_case__ :Dict = False snake_case__ :Optional[int] = True def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" super().setUp() lowerCAmelCase__ = sp.SentencePieceProcessor() spm_model.Load(__magic_name__ ) lowerCAmelCase__ = ["<s>", "<pad>", "</s>", "<unk>"] vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__magic_name__ ) )] lowerCAmelCase__ = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) lowerCAmelCase__ = Path(self.tmpdirname ) save_json(__magic_name__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(__magic_name__ , save_dir / VOCAB_FILES_NAMES["spm_file"] ) lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def __SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" lowerCAmelCase__ = "<pad>" lowerCAmelCase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(__magic_name__ ) , 1001 ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1001 ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) lowerCAmelCase__ = tokenizer.tokenize("This is a test" ) self.assertListEqual(__magic_name__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__magic_name__ ) , [289, 50, 14, 174, 386] , ) lowerCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( __magic_name__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , ) lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(__magic_name__ ) self.assertListEqual(__magic_name__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] ) lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(__magic_name__ ) self.assertListEqual( __magic_name__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , ) @slow def __SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" lowerCAmelCase__ = {"input_ids": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__magic_name__ , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , ) @require_sentencepiece class A ( unittest.TestCase ): snake_case__ :Union[str, Any] = 'valhalla/s2t_mustc_multilinguial_medium' snake_case__ :Tuple = 'C\'est trop cool' snake_case__ :List[str] = 'Esto es genial' @classmethod def __SCREAMING_SNAKE_CASE ( cls : List[Any] ): """simple docstring""" lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name ) return cls def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 ) self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 ) self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 ) self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 ) def __SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" self.assertEqual(self.tokenizer.vocab_size , 10000 ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" self.assertIn(__magic_name__ , self.tokenizer.all_special_ids ) lowerCAmelCase__ = [ES_CODE, 4, 1601, 47, 7647, 2] lowerCAmelCase__ = self.tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ ) lowerCAmelCase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__magic_name__ ) self.assertEqual(__magic_name__ , __magic_name__ ) self.assertNotIn(self.tokenizer.eos_token , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" lowerCAmelCase__ = "fr" lowerCAmelCase__ = self.tokenizer(self.french_text ).input_ids self.assertEqual(encoded[0] , __magic_name__ ) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id ) def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" lowerCAmelCase__ = "fr" self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] ) lowerCAmelCase__ = "es" self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
48
1
'''simple docstring''' import builtins import sys from ...utils.imports import _is_package_available from . import cursor, input from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor from .keymap import KEYMAP UpperCAmelCase__ : Union[str, Any] = False try: UpperCAmelCase__ : int = _is_package_available("google.colab") except ModuleNotFoundError: pass @input.register class A : def __init__( self : str , __magic_name__ : str = None , __magic_name__ : list = [] ): """simple docstring""" lowerCAmelCase__ = 0 lowerCAmelCase__ = choices lowerCAmelCase__ = prompt if sys.platform == "win32": lowerCAmelCase__ = "*" else: lowerCAmelCase__ = "➔ " def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : int , __magic_name__ : str = "" ): """simple docstring""" if sys.platform != "win32": writeColor(self.choices[index] , 32 , __magic_name__ ) else: forceWrite(self.choices[index] , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : int ): """simple docstring""" if index == self.position: forceWrite(f""" {self.arrow_char} """ ) self.write_choice(__magic_name__ ) else: forceWrite(f""" {self.choices[index]}""" ) reset_cursor() def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Direction , __magic_name__ : int = 1 ): """simple docstring""" lowerCAmelCase__ = self.position if direction == Direction.DOWN: if self.position + 1 >= len(self.choices ): return self.position += num_spaces else: if self.position - 1 < 0: return self.position -= num_spaces clear_line() self.print_choice(__magic_name__ ) move_cursor(__magic_name__ , direction.name ) self.print_choice(self.position ) @input.mark(KEYMAP["up"] ) def __SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" self.move_direction(Direction.UP ) @input.mark(KEYMAP["down"] ) def __SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" self.move_direction(Direction.DOWN ) @input.mark(KEYMAP["newline"] ) def __SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" move_cursor(len(self.choices ) - self.position , "DOWN" ) return self.position @input.mark(KEYMAP["interrupt"] ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" move_cursor(len(self.choices ) - self.position , "DOWN" ) raise KeyboardInterrupt @input.mark_multiple(*[KEYMAP[str(__magic_name__ )] for number in range(10 )] ) def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" lowerCAmelCase__ = int(chr(self.current_selection ) ) lowerCAmelCase__ = index - self.position if index == self.position: return if index < len(self.choices ): if self.position > index: self.move_direction(Direction.UP , -movement ) elif self.position < index: self.move_direction(Direction.DOWN , __magic_name__ ) else: return else: return def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : int = 0 ): """simple docstring""" if self.prompt: linebreak() forceWrite(self.prompt , "\n" ) if in_colab: forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" ) else: forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" ) lowerCAmelCase__ = default_choice for i in range(len(self.choices ) ): self.print_choice(__magic_name__ ) forceWrite("\n" ) move_cursor(len(self.choices ) - self.position , "UP" ) with cursor.hide(): while True: if in_colab: try: lowerCAmelCase__ = int(builtins.input() ) except ValueError: lowerCAmelCase__ = default_choice else: lowerCAmelCase__ = self.handle_input() if choice is not None: reset_cursor() for _ in range(len(self.choices ) + 1 ): move_cursor(1 , "UP" ) clear_line() self.write_choice(__magic_name__ , "\n" ) return choice
48
'''simple docstring''' from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig UpperCAmelCase__ : Tuple = logging.get_logger(__name__) # General docstring UpperCAmelCase__ : int = "RegNetConfig" # Base docstring UpperCAmelCase__ : Optional[int] = "facebook/regnet-y-040" UpperCAmelCase__ : Optional[int] = [1, 10_88, 7, 7] # Image classification docstring UpperCAmelCase__ : Tuple = "facebook/regnet-y-040" UpperCAmelCase__ : Optional[Any] = "tabby, tabby cat" UpperCAmelCase__ : int = [ "facebook/regnet-y-040", # See all regnet models at https://huggingface.co/models?filter=regnet ] class A ( tf.keras.layers.Layer ): def __init__( self : str , __magic_name__ : int , __magic_name__ : int = 3 , __magic_name__ : int = 1 , __magic_name__ : int = 1 , __magic_name__ : Optional[str] = "relu" , **__magic_name__ : int , ): """simple docstring""" super().__init__(**__magic_name__ ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb lowerCAmelCase__ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) lowerCAmelCase__ = tf.keras.layers.ConvaD( filters=__magic_name__ , kernel_size=__magic_name__ , strides=__magic_name__ , padding="VALID" , groups=__magic_name__ , use_bias=__magic_name__ , name="convolution" , ) lowerCAmelCase__ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) lowerCAmelCase__ = ACTaFN[activation] if activation is not None else tf.identity def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ): """simple docstring""" lowerCAmelCase__ = self.convolution(self.padding(__magic_name__ ) ) lowerCAmelCase__ = self.normalization(__magic_name__ ) lowerCAmelCase__ = self.activation(__magic_name__ ) return hidden_state class A ( tf.keras.layers.Layer ): def __init__( self : List[Any] , __magic_name__ : RegNetConfig , **__magic_name__ : str ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = config.num_channels lowerCAmelCase__ = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , ) def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : List[Any] ): """simple docstring""" lowerCAmelCase__ = shape_list(__magic_name__ )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 2, 3, 1) ) lowerCAmelCase__ = self.embedder(__magic_name__ ) return hidden_state class A ( tf.keras.layers.Layer ): def __init__( self : Any , __magic_name__ : int , __magic_name__ : int = 2 , **__magic_name__ : Optional[Any] ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = tf.keras.layers.ConvaD( filters=__magic_name__ , kernel_size=1 , strides=__magic_name__ , use_bias=__magic_name__ , name="convolution" ) lowerCAmelCase__ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : tf.Tensor , __magic_name__ : bool = False ): """simple docstring""" return self.normalization(self.convolution(__magic_name__ ) , training=__magic_name__ ) class A ( tf.keras.layers.Layer ): def __init__( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : int , **__magic_name__ : List[Any] ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__magic_name__ , name="pooler" ) lowerCAmelCase__ = [ tf.keras.layers.ConvaD(filters=__magic_name__ , kernel_size=1 , activation="relu" , name="attention.0" ), tf.keras.layers.ConvaD(filters=__magic_name__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ), ] def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = self.pooler(__magic_name__ ) for layer_module in self.attention: lowerCAmelCase__ = layer_module(__magic_name__ ) lowerCAmelCase__ = hidden_state * pooled return hidden_state class A ( tf.keras.layers.Layer ): def __init__( self : int , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 1 , **__magic_name__ : str ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = in_channels != out_channels or stride != 1 lowerCAmelCase__ = max(1 , out_channels // config.groups_width ) lowerCAmelCase__ = ( TFRegNetShortCut(__magic_name__ , stride=__magic_name__ , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. lowerCAmelCase__ = [ TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( __magic_name__ , stride=__magic_name__ , groups=__magic_name__ , activation=config.hidden_act , name="layer.1" ), TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=__magic_name__ , name="layer.2" ), ] lowerCAmelCase__ = ACTaFN[config.hidden_act] def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Any ): """simple docstring""" lowerCAmelCase__ = hidden_state for layer_module in self.layers: lowerCAmelCase__ = layer_module(__magic_name__ ) lowerCAmelCase__ = self.shortcut(__magic_name__ ) hidden_state += residual lowerCAmelCase__ = self.activation(__magic_name__ ) return hidden_state class A ( tf.keras.layers.Layer ): def __init__( self : int , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 1 , **__magic_name__ : str ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = in_channels != out_channels or stride != 1 lowerCAmelCase__ = max(1 , out_channels // config.groups_width ) lowerCAmelCase__ = ( TFRegNetShortCut(__magic_name__ , stride=__magic_name__ , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) lowerCAmelCase__ = [ TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( __magic_name__ , stride=__magic_name__ , groups=__magic_name__ , activation=config.hidden_act , name="layer.1" ), TFRegNetSELayer(__magic_name__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ), TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=__magic_name__ , name="layer.3" ), ] lowerCAmelCase__ = ACTaFN[config.hidden_act] def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : Any ): """simple docstring""" lowerCAmelCase__ = hidden_state for layer_module in self.layers: lowerCAmelCase__ = layer_module(__magic_name__ ) lowerCAmelCase__ = self.shortcut(__magic_name__ ) hidden_state += residual lowerCAmelCase__ = self.activation(__magic_name__ ) return hidden_state class A ( tf.keras.layers.Layer ): def __init__( self : Union[str, Any] , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 2 , __magic_name__ : int = 2 , **__magic_name__ : Optional[int] ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer lowerCAmelCase__ = [ # downsampling is done in the first layer with stride of 2 layer(__magic_name__ , __magic_name__ , __magic_name__ , stride=__magic_name__ , name="layers.0" ), *[layer(__magic_name__ , __magic_name__ , __magic_name__ , name=f"""layers.{i+1}""" ) for i in range(depth - 1 )], ] def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[str] ): """simple docstring""" for layer_module in self.layers: lowerCAmelCase__ = layer_module(__magic_name__ ) return hidden_state class A ( tf.keras.layers.Layer ): def __init__( self : Tuple , __magic_name__ : RegNetConfig , **__magic_name__ : Union[str, Any] ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( __magic_name__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) ) lowerCAmelCase__ = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(__magic_name__ , config.depths[1:] ) ): self.stages.append(TFRegNetStage(__magic_name__ , __magic_name__ , __magic_name__ , depth=__magic_name__ , name=f"""stages.{i+1}""" ) ) def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : tf.Tensor , __magic_name__ : bool = False , __magic_name__ : bool = True ): """simple docstring""" lowerCAmelCase__ = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: lowerCAmelCase__ = hidden_states + (hidden_state,) lowerCAmelCase__ = stage_module(__magic_name__ ) if output_hidden_states: lowerCAmelCase__ = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=__magic_name__ , hidden_states=__magic_name__ ) @keras_serializable class A ( tf.keras.layers.Layer ): snake_case__ :List[Any] = RegNetConfig def __init__( self : str , __magic_name__ : Union[str, Any] , **__magic_name__ : Union[str, Any] ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = config lowerCAmelCase__ = TFRegNetEmbeddings(__magic_name__ , name="embedder" ) lowerCAmelCase__ = TFRegNetEncoder(__magic_name__ , name="encoder" ) lowerCAmelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__magic_name__ , name="pooler" ) @unpack_inputs def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : tf.Tensor , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , ): """simple docstring""" lowerCAmelCase__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict lowerCAmelCase__ = self.embedder(__magic_name__ , training=__magic_name__ ) lowerCAmelCase__ = self.encoder( __magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ ) lowerCAmelCase__ = encoder_outputs[0] lowerCAmelCase__ = self.pooler(__magic_name__ ) # Change to NCHW output format have uniformity in the modules lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) ) lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: lowerCAmelCase__ = tuple([tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__magic_name__ , pooler_output=__magic_name__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :str = RegNetConfig snake_case__ :Optional[Any] = 'regnet' snake_case__ :Tuple = 'pixel_values' @property def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )} UpperCAmelCase__ : List[str] = R"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n" UpperCAmelCase__ : Tuple = R"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( 'The bare RegNet model outputting raw features without any specific head on top.' , SCREAMING_SNAKE_CASE__ , ) class A ( SCREAMING_SNAKE_CASE__ ): def __init__( self : Any , __magic_name__ : RegNetConfig , *__magic_name__ : Optional[int] , **__magic_name__ : Union[str, Any] ): """simple docstring""" super().__init__(__magic_name__ , *__magic_name__ , **__magic_name__ ) lowerCAmelCase__ = TFRegNetMainLayer(__magic_name__ , name="regnet" ) @unpack_inputs @add_start_docstrings_to_model_forward(__magic_name__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : tf.Tensor , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : int=False , ): """simple docstring""" lowerCAmelCase__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict lowerCAmelCase__ = self.regnet( pixel_values=__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( '\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , SCREAMING_SNAKE_CASE__ , ) class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): def __init__( self : Tuple , __magic_name__ : RegNetConfig , *__magic_name__ : Tuple , **__magic_name__ : Optional[int] ): """simple docstring""" super().__init__(__magic_name__ , *__magic_name__ , **__magic_name__ ) lowerCAmelCase__ = config.num_labels lowerCAmelCase__ = TFRegNetMainLayer(__magic_name__ , name="regnet" ) # classification head lowerCAmelCase__ = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(__magic_name__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : tf.Tensor = None , __magic_name__ : tf.Tensor = None , __magic_name__ : bool = None , __magic_name__ : bool = None , __magic_name__ : Dict=False , ): """simple docstring""" lowerCAmelCase__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict lowerCAmelCase__ = self.regnet( __magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ ) lowerCAmelCase__ = outputs.pooler_output if return_dict else outputs[1] lowerCAmelCase__ = self.classifier[0](__magic_name__ ) lowerCAmelCase__ = self.classifier[1](__magic_name__ ) lowerCAmelCase__ = None if labels is None else self.hf_compute_loss(labels=__magic_name__ , logits=__magic_name__ ) if not return_dict: lowerCAmelCase__ = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=__magic_name__ , logits=__magic_name__ , hidden_states=outputs.hidden_states )
48
1
'''simple docstring''' import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process UpperCAmelCase__ : str = logging.getLogger(__name__) UpperCAmelCase__ : Dict = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) UpperCAmelCase__ : Optional[int] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class A : snake_case__ :Optional[str] = field( default=SCREAMING_SNAKE_CASE__ , metadata={ 'help': ( 'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.' ) } , ) snake_case__ :Optional[str] = field( default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(SCREAMING_SNAKE_CASE__ )} , ) snake_case__ :Optional[str] = field( default=SCREAMING_SNAKE_CASE__ , metadata={ 'help': ( 'Override some existing default config settings when a model is trained from scratch. Example: ' 'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index' ) } , ) snake_case__ :Optional[str] = field( default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) snake_case__ :Optional[str] = field( default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) snake_case__ :Optional[str] = field( default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) snake_case__ :bool = field( default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , ) snake_case__ :str = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) snake_case__ :bool = field( default=SCREAMING_SNAKE_CASE__ , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) def __SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( "--config_overrides can't be used in combination with --config_name or --model_name_or_path" ) @dataclass class A : snake_case__ :Optional[str] = field( default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The name of the dataset to use (via the datasets library).'} ) snake_case__ :Optional[str] = field( default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} ) snake_case__ :Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The input training data file (a text file).'} ) snake_case__ :Optional[str] = field( default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , ) snake_case__ :Optional[str] = field( default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} , ) snake_case__ :Optional[str] = field( default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} , ) snake_case__ :bool = field( default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) snake_case__ :Optional[int] = field( default=5 , metadata={ 'help': 'The percentage of the train set used as validation set in case there\'s no validation split' } , ) snake_case__ :Optional[int] = field( default=SCREAMING_SNAKE_CASE__ , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated. Default to the max input length of the model.' ) } , ) snake_case__ :Optional[int] = field( default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The number of processes to use for the preprocessing.'} , ) snake_case__ :float = field( default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} ) snake_case__ :bool = field( default=SCREAMING_SNAKE_CASE__ , metadata={ 'help': ( 'Whether to pad all samples to `max_seq_length`. ' 'If False, will pad the samples dynamically when batching to the maximum length in the batch.' ) } , ) def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" if self.train_file is not None: lowerCAmelCase__ = self.train_file.split("." )[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: lowerCAmelCase__ = self.validation_file.split("." )[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def A ( UpperCamelCase_ : Dict , UpperCamelCase_ : str ) -> Dict: '''simple docstring''' with open(UpperCamelCase_ , "r" , encoding="utf-8" ) as f: lowerCAmelCase__ = [json.loads(UpperCamelCase_ ) for line in f.read().splitlines() if (len(UpperCamelCase_ ) > 0 and not line.isspace())] assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ) lowerCAmelCase__ = {c: dataset[c] for c in dataset.column_names} lowerCAmelCase__ = refs return Dataset.from_dict(UpperCamelCase_ ) def A ( ) -> Dict: '''simple docstring''' lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = parser.parse_args_into_dataclasses() # Detecting last checkpoint. lowerCAmelCase__ = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowerCAmelCase__ = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s" , UpperCamelCase_ ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. lowerCAmelCase__ = load_dataset(data_args.dataset_name , data_args.dataset_config_name ) if "validation" not in datasets.keys(): lowerCAmelCase__ = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , ) lowerCAmelCase__ = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , ) else: lowerCAmelCase__ = {} if data_args.train_file is not None: lowerCAmelCase__ = data_args.train_file if data_args.validation_file is not None: lowerCAmelCase__ = data_args.validation_file lowerCAmelCase__ = data_args.train_file.split("." )[-1] if extension == "txt": lowerCAmelCase__ = "text" lowerCAmelCase__ = load_dataset(UpperCamelCase_ , data_files=UpperCamelCase_ ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCAmelCase__ = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name: lowerCAmelCase__ = AutoConfig.from_pretrained(model_args.config_name , **UpperCamelCase_ ) elif model_args.model_name_or_path: lowerCAmelCase__ = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCamelCase_ ) else: lowerCAmelCase__ = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch." ) if model_args.config_overrides is not None: logger.info(F"""Overriding config: {model_args.config_overrides}""" ) config.update_from_string(model_args.config_overrides ) logger.info(F"""New config: {config}""" ) lowerCAmelCase__ = { "cache_dir": model_args.cache_dir, "use_fast": model_args.use_fast_tokenizer, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: lowerCAmelCase__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **UpperCamelCase_ ) elif model_args.model_name_or_path: lowerCAmelCase__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **UpperCamelCase_ ) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if model_args.model_name_or_path: lowerCAmelCase__ = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("Training new model from scratch" ) lowerCAmelCase__ = AutoModelForMaskedLM.from_config(UpperCamelCase_ ) model.resize_token_embeddings(len(UpperCamelCase_ ) ) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: lowerCAmelCase__ = datasets["train"].column_names else: lowerCAmelCase__ = datasets["validation"].column_names lowerCAmelCase__ = "text" if "text" in column_names else column_names[0] lowerCAmelCase__ = "max_length" if data_args.pad_to_max_length else False def tokenize_function(UpperCamelCase_ : int ): # Remove empty lines lowerCAmelCase__ = [line for line in examples["text"] if len(UpperCamelCase_ ) > 0 and not line.isspace()] return tokenizer(examples["text"] , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=data_args.max_seq_length ) lowerCAmelCase__ = datasets.map( UpperCamelCase_ , batched=UpperCamelCase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , ) # Add the chinese references if provided if data_args.train_ref_file is not None: lowerCAmelCase__ = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file ) if data_args.validation_ref_file is not None: lowerCAmelCase__ = add_chinese_references( tokenized_datasets["validation"] , data_args.validation_ref_file ) # If we have ref files, need to avoid it removed by trainer lowerCAmelCase__ = data_args.train_ref_file or data_args.validation_ref_file if has_ref: lowerCAmelCase__ = False # Data collator # This one will take care of randomly masking the tokens. lowerCAmelCase__ = DataCollatorForWholeWordMask(tokenizer=UpperCamelCase_ , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer lowerCAmelCase__ = Trainer( model=UpperCamelCase_ , args=UpperCamelCase_ , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=UpperCamelCase_ , data_collator=UpperCamelCase_ , ) # Training if training_args.do_train: if last_checkpoint is not None: lowerCAmelCase__ = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ): lowerCAmelCase__ = model_args.model_name_or_path else: lowerCAmelCase__ = None lowerCAmelCase__ = trainer.train(resume_from_checkpoint=UpperCamelCase_ ) trainer.save_model() # Saves the tokenizer too for easy upload lowerCAmelCase__ = os.path.join(training_args.output_dir , "train_results.txt" ) if trainer.is_world_process_zero(): with open(UpperCamelCase_ , "w" ) as writer: logger.info("***** Train results *****" ) for key, value in sorted(train_result.metrics.items() ): logger.info(F""" {key} = {value}""" ) writer.write(F"""{key} = {value}\n""" ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) ) # Evaluation lowerCAmelCase__ = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) lowerCAmelCase__ = trainer.evaluate() lowerCAmelCase__ = math.exp(eval_output["eval_loss"] ) lowerCAmelCase__ = perplexity lowerCAmelCase__ = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt" ) if trainer.is_world_process_zero(): with open(UpperCamelCase_ , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in sorted(results.items() ): logger.info(F""" {key} = {value}""" ) writer.write(F"""{key} = {value}\n""" ) return results def A ( UpperCamelCase_ : Optional[int] ) -> str: '''simple docstring''' main() if __name__ == "__main__": main()
48
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def A ( UpperCamelCase_ : Tuple ) -> int: '''simple docstring''' for param in module.parameters(): lowerCAmelCase__ = False def A ( ) -> Tuple: '''simple docstring''' lowerCAmelCase__ = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): lowerCAmelCase__ = "mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def A ( UpperCamelCase_ : Optional[int] ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase__ = plt.imshow(UpperCamelCase_ ) fig.axes.get_xaxis().set_visible(UpperCamelCase_ ) fig.axes.get_yaxis().set_visible(UpperCamelCase_ ) plt.show() def A ( ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase__ = datetime.now() lowerCAmelCase__ = current_time.strftime("%H:%M:%S" ) return timestamp
48
1
SCREAMING_SNAKE_CASE__ : Tuple = { """a""": """AAAAA""", """b""": """AAAAB""", """c""": """AAABA""", """d""": """AAABB""", """e""": """AABAA""", """f""": """AABAB""", """g""": """AABBA""", """h""": """AABBB""", """i""": """ABAAA""", """j""": """BBBAA""", """k""": """ABAAB""", """l""": """ABABA""", """m""": """ABABB""", """n""": """ABBAA""", """o""": """ABBAB""", """p""": """ABBBA""", """q""": """ABBBB""", """r""": """BAAAA""", """s""": """BAAAB""", """t""": """BAABA""", """u""": """BAABB""", """v""": """BBBAB""", """w""": """BABAA""", """x""": """BABAB""", """y""": """BABBA""", """z""": """BABBB""", """ """: """ """, } SCREAMING_SNAKE_CASE__ : Union[str, Any] = {value: key for key, value in encode_dict.items()} def __lowercase ( snake_case ): """simple docstring""" __magic_name__ :Tuple = '''''' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('''encode() accepts only letters of the alphabet and spaces''' ) return encoded def __lowercase ( snake_case ): """simple docstring""" if set(snake_case ) - {"A", "B", " "} != set(): raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' ) __magic_name__ :Dict = '''''' for word in coded.split(): while len(snake_case ) != 0: decoded += decode_dict[word[:5]] __magic_name__ :int = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCAmelCase__ : List[Any] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Union[str, Any] = ["EncoderDecoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Optional[int] = ["TFEncoderDecoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Optional[Any] = ["FlaxEncoderDecoderModel"] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys UpperCAmelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
48
0
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class __lowerCamelCase (unittest.TestCase ): @slow def snake_case_ ( self: Any ): '''simple docstring''' __UpperCamelCase = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' ) __UpperCamelCase = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]],dtype=tf.intaa,) # J'aime le camembert !" __UpperCamelCase = model(A_ )['last_hidden_state'] __UpperCamelCase = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape,A_ ) # compare the actual values for a slice. __UpperCamelCase = tf.convert_to_tensor( [[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]],dtype=tf.floataa,) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy(),expected_slice.numpy(),atol=1E-4 ) )
1
'''simple docstring''' import argparse from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird from transformers.utils import logging logging.set_verbosity_info() def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : int ) -> Any: '''simple docstring''' lowerCAmelCase__ = BigBirdConfig.from_json_file(UpperCamelCase_ ) print(F"""Building PyTorch model from configuration: {config}""" ) if is_trivia_qa: lowerCAmelCase__ = BigBirdForQuestionAnswering(UpperCamelCase_ ) else: lowerCAmelCase__ = BigBirdForPreTraining(UpperCamelCase_ ) # Load weights from tf checkpoint load_tf_weights_in_big_bird(UpperCamelCase_ , UpperCamelCase_ , is_trivia_qa=UpperCamelCase_ ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(UpperCamelCase_ ) if __name__ == "__main__": UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--big_bird_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head." ) UpperCAmelCase__ : int = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa )
48
0
UpperCAmelCase_ = 0 # The first color of the flag. UpperCAmelCase_ = 1 # The second color of the flag. UpperCAmelCase_ = 2 # The third color of the flag. UpperCAmelCase_ = (red, white, blue) def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> list: if not sequence: return [] if len(_snake_case ) == 1: return list(_snake_case ) _A = 0 _A = len(_snake_case ) - 1 _A = 0 while mid <= high: if sequence[mid] == colors[0]: _A , _A = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _A , _A = sequence[high], sequence[mid] high -= 1 else: _A = F'''The elements inside the sequence must contains only {colors} values''' raise ValueError(_snake_case ) return sequence if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase_ = input("""Enter numbers separated by commas:\n""").strip() UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(""",""")] print(f'{dutch_national_flag_sort(unsorted)}')
2
'''simple docstring''' from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class A : def __init__( self : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : str=13 , __magic_name__ : List[str]=7 , __magic_name__ : Tuple=True , __magic_name__ : Tuple=True , __magic_name__ : str=True , __magic_name__ : int=True , __magic_name__ : int=99 , __magic_name__ : List[str]=[1, 1, 2] , __magic_name__ : Dict=1 , __magic_name__ : Tuple=32 , __magic_name__ : Any=4 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[Any]=37 , __magic_name__ : Tuple="gelu_new" , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : Tuple=0.0 , __magic_name__ : int=512 , __magic_name__ : Optional[int]=3 , __magic_name__ : List[str]=0.02 , __magic_name__ : Dict=3 , __magic_name__ : List[Any]=4 , __magic_name__ : Any=None , __magic_name__ : Dict=False , ): """simple docstring""" lowerCAmelCase__ = parent lowerCAmelCase__ = batch_size lowerCAmelCase__ = seq_length lowerCAmelCase__ = is_training lowerCAmelCase__ = use_input_mask lowerCAmelCase__ = use_token_type_ids lowerCAmelCase__ = use_labels lowerCAmelCase__ = vocab_size lowerCAmelCase__ = block_sizes lowerCAmelCase__ = num_decoder_layers lowerCAmelCase__ = d_model lowerCAmelCase__ = n_head lowerCAmelCase__ = d_head lowerCAmelCase__ = d_inner lowerCAmelCase__ = hidden_act lowerCAmelCase__ = hidden_dropout lowerCAmelCase__ = attention_dropout lowerCAmelCase__ = activation_dropout lowerCAmelCase__ = max_position_embeddings lowerCAmelCase__ = type_vocab_size lowerCAmelCase__ = 2 lowerCAmelCase__ = num_labels lowerCAmelCase__ = num_choices lowerCAmelCase__ = scope lowerCAmelCase__ = initializer_std # Used in the tests to check the size of the first attention layer lowerCAmelCase__ = n_head # Used in the tests to check the size of the first hidden state lowerCAmelCase__ = self.d_model # Used in the tests to check the number of output hidden states/attentions lowerCAmelCase__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: lowerCAmelCase__ = self.num_hidden_layers + 2 def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ = None if self.use_input_mask: lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ = None if self.use_token_type_ids: lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase__ = None lowerCAmelCase__ = None lowerCAmelCase__ = None if self.use_labels: lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase__ = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : str , ): """simple docstring""" lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ ) lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowerCAmelCase__ = model(__magic_name__ ) lowerCAmelCase__ = [input_ids, input_mask] lowerCAmelCase__ = model(__magic_name__ ) lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) lowerCAmelCase__ = False lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ ) lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) lowerCAmelCase__ = False lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ ) lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : int , ): """simple docstring""" lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ ) lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowerCAmelCase__ = model(__magic_name__ ) lowerCAmelCase__ = [input_ids, input_mask] lowerCAmelCase__ = model(__magic_name__ ) lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) lowerCAmelCase__ = False lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ ) lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) ) lowerCAmelCase__ = False lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ ) lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , ): """simple docstring""" lowerCAmelCase__ = TFFunnelForPreTraining(config=__magic_name__ ) lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Dict , ): """simple docstring""" lowerCAmelCase__ = TFFunnelForMaskedLM(config=__magic_name__ ) lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Any , ): """simple docstring""" lowerCAmelCase__ = self.num_labels lowerCAmelCase__ = TFFunnelForSequenceClassification(config=__magic_name__ ) lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : List[str] , ): """simple docstring""" lowerCAmelCase__ = self.num_choices lowerCAmelCase__ = TFFunnelForMultipleChoice(config=__magic_name__ ) lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) ) lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) ) lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) ) lowerCAmelCase__ = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : str , ): """simple docstring""" lowerCAmelCase__ = self.num_labels lowerCAmelCase__ = TFFunnelForTokenClassification(config=__magic_name__ ) lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : List[str] , ): """simple docstring""" lowerCAmelCase__ = TFFunnelForQuestionAnswering(config=__magic_name__ ) lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = self.prepare_config_and_inputs() ( ( lowerCAmelCase__ ) ,( lowerCAmelCase__ ) ,( lowerCAmelCase__ ) ,( lowerCAmelCase__ ) ,( lowerCAmelCase__ ) ,( lowerCAmelCase__ ) ,( lowerCAmelCase__ ) , ) = config_and_inputs lowerCAmelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): snake_case__ :int = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) snake_case__ :Any = ( { 'feature-extraction': (TFFunnelBaseModel, TFFunnelModel), 'fill-mask': TFFunnelForMaskedLM, 'question-answering': TFFunnelForQuestionAnswering, 'text-classification': TFFunnelForSequenceClassification, 'token-classification': TFFunnelForTokenClassification, 'zero-shot': TFFunnelForSequenceClassification, } if is_tf_available() else {} ) snake_case__ :str = False snake_case__ :Any = False def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = TFFunnelModelTester(self ) lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" self.config_tester.run_common_tests() def __SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__magic_name__ ) @require_tf class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): snake_case__ :Any = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) snake_case__ :int = False snake_case__ :List[Any] = False def __SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" lowerCAmelCase__ = TFFunnelModelTester(self , base=__magic_name__ ) lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" self.config_tester.run_common_tests() def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__magic_name__ )
48
0
'''simple docstring''' import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class SCREAMING_SNAKE_CASE__ ( snake_case_): lowerCAmelCase_ = (DDPMScheduler,) def UpperCAmelCase_ ( self , **A_ )-> str: '''simple docstring''' UpperCamelCase = { 'num_train_timesteps': 1000, 'beta_start': 0.0_001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**A_ ) return config def UpperCAmelCase_ ( self )-> Optional[int]: '''simple docstring''' for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=A_ ) def UpperCAmelCase_ ( self )-> Optional[int]: '''simple docstring''' for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=A_ , beta_end=A_ ) def UpperCAmelCase_ ( self )-> Union[str, Any]: '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=A_ ) def UpperCAmelCase_ ( self )-> Optional[Any]: '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=A_ ) def UpperCAmelCase_ ( self )-> Tuple: '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=A_ ) def UpperCAmelCase_ ( self )-> Tuple: '''simple docstring''' self.check_over_configs(thresholding=A_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=A_ , prediction_type=A_ , sample_max_value=A_ , ) def UpperCAmelCase_ ( self )-> Union[str, Any]: '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=A_ ) def UpperCAmelCase_ ( self )-> Optional[int]: '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=A_ ) def UpperCAmelCase_ ( self )-> Tuple: '''simple docstring''' UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5 def UpperCAmelCase_ ( self )-> Optional[Any]: '''simple docstring''' UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = len(A_ ) UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter UpperCamelCase = torch.manual_seed(0 ) for t in reversed(range(A_ ) ): # 1. predict noise residual UpperCamelCase = model(A_ , A_ ) # 2. predict previous mean of sample x_t-1 UpperCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance UpperCamelCase = pred_prev_sample UpperCamelCase = torch.sum(torch.abs(A_ ) ) UpperCamelCase = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 258.9_606 ) < 1e-2 assert abs(result_mean.item() - 0.3_372 ) < 1e-3 def UpperCAmelCase_ ( self )-> str: '''simple docstring''' UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config(prediction_type='v_prediction' ) UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = len(A_ ) UpperCamelCase = self.dummy_model() UpperCamelCase = self.dummy_sample_deter UpperCamelCase = torch.manual_seed(0 ) for t in reversed(range(A_ ) ): # 1. predict noise residual UpperCamelCase = model(A_ , A_ ) # 2. predict previous mean of sample x_t-1 UpperCamelCase = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance UpperCamelCase = pred_prev_sample UpperCamelCase = torch.sum(torch.abs(A_ ) ) UpperCamelCase = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 202.0_296 ) < 1e-2 assert abs(result_mean.item() - 0.2_631 ) < 1e-3 def UpperCAmelCase_ ( self )-> Optional[Any]: '''simple docstring''' UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=A_ ) UpperCamelCase = scheduler.timesteps for i, timestep in enumerate(A_ ): if i == len(A_ ) - 1: UpperCamelCase = -1 else: UpperCamelCase = timesteps[i + 1] UpperCamelCase = scheduler.previous_timestep(A_ ) UpperCamelCase = prev_t.item() self.assertEqual(A_ , A_ ) def UpperCAmelCase_ ( self )-> Dict: '''simple docstring''' UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = [100, 87, 50, 51, 0] with self.assertRaises(A_ , msg='`custom_timesteps` must be in descending order.' ): scheduler.set_timesteps(timesteps=A_ ) def UpperCAmelCase_ ( self )-> int: '''simple docstring''' UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = [100, 87, 50, 1, 0] UpperCamelCase = len(A_ ) with self.assertRaises(A_ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ): scheduler.set_timesteps(num_inference_steps=A_ , timesteps=A_ ) def UpperCAmelCase_ ( self )-> Union[str, Any]: '''simple docstring''' UpperCamelCase = self.scheduler_classes[0] UpperCamelCase = self.get_scheduler_config() UpperCamelCase = scheduler_class(**A_ ) UpperCamelCase = [scheduler.config.num_train_timesteps] with self.assertRaises( A_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=A_ )
3
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging UpperCAmelCase__ : Tuple = logging.get_logger(__name__) UpperCAmelCase__ : List[str] = { "google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json", # See all umt5 models at https://huggingface.co/models?filter=umt5 } class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Union[str, Any] = 'umt5' snake_case__ :Any = ['past_key_values'] def __init__( self : List[Any] , __magic_name__ : Tuple=250112 , __magic_name__ : str=512 , __magic_name__ : int=64 , __magic_name__ : str=1024 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[Any]=6 , __magic_name__ : Dict=32 , __magic_name__ : Optional[Any]=128 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=1E-6 , __magic_name__ : Optional[int]=1.0 , __magic_name__ : Dict="gated-gelu" , __magic_name__ : List[str]=True , __magic_name__ : Tuple=True , __magic_name__ : Optional[int]="T5Tokenizer" , __magic_name__ : str=True , __magic_name__ : int=0 , __magic_name__ : Union[str, Any]=1 , __magic_name__ : str=0 , **__magic_name__ : Any , ): """simple docstring""" super().__init__( is_encoder_decoder=__magic_name__ , tokenizer_class=__magic_name__ , tie_word_embeddings=__magic_name__ , pad_token_id=__magic_name__ , eos_token_id=__magic_name__ , decoder_start_token_id=__magic_name__ , **__magic_name__ , ) lowerCAmelCase__ = vocab_size lowerCAmelCase__ = d_model lowerCAmelCase__ = d_kv lowerCAmelCase__ = d_ff lowerCAmelCase__ = num_layers lowerCAmelCase__ = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowerCAmelCase__ = num_heads lowerCAmelCase__ = relative_attention_num_buckets lowerCAmelCase__ = relative_attention_max_distance lowerCAmelCase__ = dropout_rate lowerCAmelCase__ = layer_norm_epsilon lowerCAmelCase__ = initializer_factor lowerCAmelCase__ = feed_forward_proj lowerCAmelCase__ = use_cache lowerCAmelCase__ = self.feed_forward_proj.split("-" ) lowerCAmelCase__ = act_info[-1] lowerCAmelCase__ = act_info[0] == "gated" if len(__magic_name__ ) > 1 and act_info[0] != "gated" or len(__magic_name__ ) > 2: raise ValueError( f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'" ) if feed_forward_proj == "gated-gelu": lowerCAmelCase__ = "gelu_new" @property def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" return self.d_model @property def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" return self.num_heads @property def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" return self.num_layers class A ( SCREAMING_SNAKE_CASE__ ): @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" lowerCAmelCase__ = { "input_ids": {0: "batch", 1: "encoder_sequence"}, "attention_mask": {0: "batch", 1: "encoder_sequence"}, } if self.use_past: lowerCAmelCase__ = "past_encoder_sequence + sequence" lowerCAmelCase__ = {0: "batch"} lowerCAmelCase__ = {0: "batch", 1: "past_decoder_sequence + sequence"} else: lowerCAmelCase__ = {0: "batch", 1: "decoder_sequence"} lowerCAmelCase__ = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(__magic_name__ , direction="inputs" ) return common_inputs @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" return 13 @property def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" return 5E-4
48
0
"""simple docstring""" def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : int ): lowerCAmelCase = word.split() def justify(_UpperCAmelCase : list , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str: lowerCAmelCase = max_width - width lowerCAmelCase = len(_UpperCAmelCase ) if len(_UpperCAmelCase ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: lowerCAmelCase = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] lowerCAmelCase = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] lowerCAmelCase = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(_UpperCAmelCase ): num_spaces_between_words_list[i] += 1 lowerCAmelCase = [] for i in range(_UpperCAmelCase ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * ' ' ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(_UpperCAmelCase ) lowerCAmelCase = [] lowerCAmelCase = [] lowerCAmelCase = 0 for word in words: if width + len(_UpperCAmelCase ) + len(_UpperCAmelCase ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(_UpperCAmelCase ) width += len(_UpperCAmelCase ) else: # justify the line and add it to result answer.append(justify(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) ) # reset new line and new width lowerCAmelCase ,lowerCAmelCase = [word], len(_UpperCAmelCase ) lowerCAmelCase = max_width - width - len(_UpperCAmelCase ) answer.append(' '.join(_UpperCAmelCase ) + (remaining_spaces + 1) * ' ' ) return answer if __name__ == "__main__": from doctest import testmod testmod()
4
'''simple docstring''' from __future__ import annotations from collections import Counter from random import random class A : def __init__( self : Optional[int] ): """simple docstring""" lowerCAmelCase__ = {} def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ): """simple docstring""" lowerCAmelCase__ = {} def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : str , __magic_name__ : float ): """simple docstring""" if nodea not in self.connections: self.add_node(__magic_name__ ) if nodea not in self.connections: self.add_node(__magic_name__ ) lowerCAmelCase__ = probability def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" return list(self.connections ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ): """simple docstring""" lowerCAmelCase__ = 0 lowerCAmelCase__ = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def A ( UpperCamelCase_ : str , UpperCamelCase_ : list[tuple[str, str, float]] , UpperCamelCase_ : int ) -> dict[str, int]: '''simple docstring''' lowerCAmelCase__ = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ = Counter(graph.get_nodes() ) lowerCAmelCase__ = start for _ in range(UpperCamelCase_ ): lowerCAmelCase__ = graph.transition(UpperCamelCase_ ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
48
0
'''simple docstring''' import os import sys import unittest _lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) _lowercase = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""") _lowercase = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""") class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = get_test_to_tester_mapping(_lowercase ) _lowerCAmelCase = get_test_to_tester_mapping(_lowercase ) _lowerCAmelCase = {"""BertModelTest""": """BertModelTester"""} _lowerCAmelCase = { """BlipModelTest""": """BlipModelTester""", """BlipTextImageModelTest""": """BlipTextImageModelsModelTester""", """BlipTextModelTest""": """BlipTextModelTester""", """BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""", """BlipVQAModelTest""": """BlipVQAModelTester""", """BlipVisionModelTest""": """BlipVisionModelTester""", } self.assertEqual(get_test_info.to_json(_lowercase ) , _lowercase ) self.assertEqual(get_test_info.to_json(_lowercase ) , _lowercase ) def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = get_model_to_test_mapping(_lowercase ) _lowerCAmelCase = get_model_to_test_mapping(_lowercase ) _lowerCAmelCase = { """BertForMaskedLM""": ["""BertModelTest"""], """BertForMultipleChoice""": ["""BertModelTest"""], """BertForNextSentencePrediction""": ["""BertModelTest"""], """BertForPreTraining""": ["""BertModelTest"""], """BertForQuestionAnswering""": ["""BertModelTest"""], """BertForSequenceClassification""": ["""BertModelTest"""], """BertForTokenClassification""": ["""BertModelTest"""], """BertLMHeadModel""": ["""BertModelTest"""], """BertModel""": ["""BertModelTest"""], } _lowerCAmelCase = { """BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""], """BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""], """BlipForQuestionAnswering""": ["""BlipVQAModelTest"""], """BlipModel""": ["""BlipModelTest"""], """BlipTextModel""": ["""BlipTextModelTest"""], """BlipVisionModel""": ["""BlipVisionModelTest"""], } self.assertEqual(get_test_info.to_json(_lowercase ) , _lowercase ) self.assertEqual(get_test_info.to_json(_lowercase ) , _lowercase ) def _lowercase ( self ): """simple docstring""" _lowerCAmelCase = get_model_to_tester_mapping(_lowercase ) _lowerCAmelCase = get_model_to_tester_mapping(_lowercase ) _lowerCAmelCase = { """BertForMaskedLM""": ["""BertModelTester"""], """BertForMultipleChoice""": ["""BertModelTester"""], """BertForNextSentencePrediction""": ["""BertModelTester"""], """BertForPreTraining""": ["""BertModelTester"""], """BertForQuestionAnswering""": ["""BertModelTester"""], """BertForSequenceClassification""": ["""BertModelTester"""], """BertForTokenClassification""": ["""BertModelTester"""], """BertLMHeadModel""": ["""BertModelTester"""], """BertModel""": ["""BertModelTester"""], } _lowerCAmelCase = { """BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""], """BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""], """BlipForQuestionAnswering""": ["""BlipVQAModelTester"""], """BlipModel""": ["""BlipModelTester"""], """BlipTextModel""": ["""BlipTextModelTester"""], """BlipVisionModel""": ["""BlipVisionModelTester"""], } self.assertEqual(get_test_info.to_json(_lowercase ) , _lowercase ) self.assertEqual(get_test_info.to_json(_lowercase ) , _lowercase )
5
'''simple docstring''' import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration UpperCAmelCase__ : Optional[Any] = pytest.mark.integration UpperCAmelCase__ : str = {"comet"} UpperCAmelCase__ : Optional[Any] = importlib.util.find_spec("fairseq") is not None UpperCAmelCase__ : Optional[int] = {"code_eval"} UpperCAmelCase__ : List[Any] = os.name == "nt" UpperCAmelCase__ : Optional[int] = {"bertscore", "frugalscore", "perplexity"} UpperCAmelCase__ : int = importlib.util.find_spec("transformers") is not None def A ( UpperCamelCase_ : Optional[int] ) -> Optional[Any]: '''simple docstring''' @wraps(UpperCamelCase_ ) def wrapper(self : Optional[Any] , UpperCamelCase_ : List[str] ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest("\"test requires Fairseq\"" ) else: test_case(self , UpperCamelCase_ ) return wrapper def A ( UpperCamelCase_ : List[Any] ) -> str: '''simple docstring''' @wraps(UpperCamelCase_ ) def wrapper(self : Optional[int] , UpperCamelCase_ : int ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest("\"test requires transformers\"" ) else: test_case(self , UpperCamelCase_ ) return wrapper def A ( UpperCamelCase_ : Any ) -> int: '''simple docstring''' @wraps(UpperCamelCase_ ) def wrapper(self : Optional[int] , UpperCamelCase_ : Optional[Any] ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest("\"test not supported on Windows\"" ) else: test_case(self , UpperCamelCase_ ) return wrapper def A ( ) -> Tuple: '''simple docstring''' lowerCAmelCase__ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @local class A ( parameterized.TestCase ): snake_case__ :Union[str, Any] = {} snake_case__ :Optional[Any] = None @pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" ) @pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = "[...]" lowerCAmelCase__ = importlib.import_module( datasets.load.metric_module_factory(os.path.join("metrics" , __magic_name__ ) ).module_path ) lowerCAmelCase__ = datasets.load.import_main_class(metric_module.__name__ , dataset=__magic_name__ ) # check parameters lowerCAmelCase__ = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(__magic_name__ , metric_module.__name__ ): with self.use_local_metrics(): try: lowerCAmelCase__ = doctest.testmod(__magic_name__ , verbose=__magic_name__ , raise_on_error=__magic_name__ ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Tuple ): """simple docstring""" lowerCAmelCase__ = "[...]" lowerCAmelCase__ = importlib.import_module( datasets.load.metric_module_factory(os.path.join("metrics" , __magic_name__ ) ).module_path ) # run doctest with self.use_local_metrics(): lowerCAmelCase__ = doctest.testmod(__magic_name__ , verbose=__magic_name__ , raise_on_error=__magic_name__ ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str ): """simple docstring""" if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](__magic_name__ ): yield else: yield @contextmanager def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" def load_local_metric(__magic_name__ : Union[str, Any] , *__magic_name__ : Any , **__magic_name__ : Any ): return load_metric(os.path.join("metrics" , __magic_name__ ) , *__magic_name__ , **__magic_name__ ) with patch("datasets.load_metric" ) as mock_load_metric: lowerCAmelCase__ = load_local_metric yield @classmethod def __SCREAMING_SNAKE_CASE ( cls : Any , __magic_name__ : Optional[int] ): """simple docstring""" def wrapper(__magic_name__ : Dict ): lowerCAmelCase__ = contextmanager(__magic_name__ ) lowerCAmelCase__ = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher("bleurt" ) def A ( UpperCamelCase_ : str ) -> Any: '''simple docstring''' import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags class A ( SCREAMING_SNAKE_CASE__ ): def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] ): """simple docstring""" assert len(input_dict["input_ids"] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch("bleurt.score._create_predictor" ) as mock_create_predictor: lowerCAmelCase__ = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher("bertscore" ) def A ( UpperCamelCase_ : List[Any] ) -> Optional[Any]: '''simple docstring''' import torch def bert_cos_score_idf(UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : List[str] ): return torch.tensor([[1.0, 1.0, 1.0]] * len(UpperCamelCase_ ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch("bert_score.scorer.get_model" ), patch( "bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf: lowerCAmelCase__ = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher("comet" ) def A ( UpperCamelCase_ : Optional[int] ) -> Any: '''simple docstring''' def load_from_checkpoint(UpperCamelCase_ : Tuple ): class A : def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] , *__magic_name__ : int , **__magic_name__ : Dict ): """simple docstring""" assert len(__magic_name__ ) == 2 lowerCAmelCase__ = [0.19, 0.92] return scores, sum(__magic_name__ ) / len(__magic_name__ ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch("comet.download_model" ) as mock_download_model: lowerCAmelCase__ = None with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint: lowerCAmelCase__ = load_from_checkpoint yield def A ( ) -> Tuple: '''simple docstring''' lowerCAmelCase__ = load_metric(os.path.join("metrics" , "seqeval" ) ) lowerCAmelCase__ = "ERROR" lowerCAmelCase__ = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}""" with pytest.raises(UpperCamelCase_ , match=re.escape(UpperCamelCase_ ) ): metric.compute(predictions=[] , references=[] , scheme=UpperCamelCase_ )
48
0
import requests from bsa import BeautifulSoup def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str = "https://www.worldometers.info/coronavirus" ): SCREAMING_SNAKE_CASE__ = BeautifulSoup(requests.get(UpperCamelCase__ ).text , """html.parser""" ) SCREAMING_SNAKE_CASE__ = soup.findAll("""h1""" ) SCREAMING_SNAKE_CASE__ = soup.findAll("""div""" , {"""class""": """maincounter-number"""} ) keys += soup.findAll("""span""" , {"""class""": """panel-title"""} ) values += soup.findAll("""div""" , {"""class""": """number-table-main"""} ) return {key.text.strip(): value.text.strip() for key, value in zip(UpperCamelCase__ , UpperCamelCase__ )} if __name__ == "__main__": print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n') for key, value in world_covidaa_stats().items(): print(F'''{key}\n{value}\n''')
6
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool UpperCAmelCase__ : int = { "Acehnese Arabic": "ace_Arab", "Acehnese Latin": "ace_Latn", "Mesopotamian Arabic": "acm_Arab", "Ta'izzi-Adeni Arabic": "acq_Arab", "Tunisian Arabic": "aeb_Arab", "Afrikaans": "afr_Latn", "South Levantine Arabic": "ajp_Arab", "Akan": "aka_Latn", "Amharic": "amh_Ethi", "North Levantine Arabic": "apc_Arab", "Modern Standard Arabic": "arb_Arab", "Modern Standard Arabic Romanized": "arb_Latn", "Najdi Arabic": "ars_Arab", "Moroccan Arabic": "ary_Arab", "Egyptian Arabic": "arz_Arab", "Assamese": "asm_Beng", "Asturian": "ast_Latn", "Awadhi": "awa_Deva", "Central Aymara": "ayr_Latn", "South Azerbaijani": "azb_Arab", "North Azerbaijani": "azj_Latn", "Bashkir": "bak_Cyrl", "Bambara": "bam_Latn", "Balinese": "ban_Latn", "Belarusian": "bel_Cyrl", "Bemba": "bem_Latn", "Bengali": "ben_Beng", "Bhojpuri": "bho_Deva", "Banjar Arabic": "bjn_Arab", "Banjar Latin": "bjn_Latn", "Standard Tibetan": "bod_Tibt", "Bosnian": "bos_Latn", "Buginese": "bug_Latn", "Bulgarian": "bul_Cyrl", "Catalan": "cat_Latn", "Cebuano": "ceb_Latn", "Czech": "ces_Latn", "Chokwe": "cjk_Latn", "Central Kurdish": "ckb_Arab", "Crimean Tatar": "crh_Latn", "Welsh": "cym_Latn", "Danish": "dan_Latn", "German": "deu_Latn", "Southwestern Dinka": "dik_Latn", "Dyula": "dyu_Latn", "Dzongkha": "dzo_Tibt", "Greek": "ell_Grek", "English": "eng_Latn", "Esperanto": "epo_Latn", "Estonian": "est_Latn", "Basque": "eus_Latn", "Ewe": "ewe_Latn", "Faroese": "fao_Latn", "Fijian": "fij_Latn", "Finnish": "fin_Latn", "Fon": "fon_Latn", "French": "fra_Latn", "Friulian": "fur_Latn", "Nigerian Fulfulde": "fuv_Latn", "Scottish Gaelic": "gla_Latn", "Irish": "gle_Latn", "Galician": "glg_Latn", "Guarani": "grn_Latn", "Gujarati": "guj_Gujr", "Haitian Creole": "hat_Latn", "Hausa": "hau_Latn", "Hebrew": "heb_Hebr", "Hindi": "hin_Deva", "Chhattisgarhi": "hne_Deva", "Croatian": "hrv_Latn", "Hungarian": "hun_Latn", "Armenian": "hye_Armn", "Igbo": "ibo_Latn", "Ilocano": "ilo_Latn", "Indonesian": "ind_Latn", "Icelandic": "isl_Latn", "Italian": "ita_Latn", "Javanese": "jav_Latn", "Japanese": "jpn_Jpan", "Kabyle": "kab_Latn", "Jingpho": "kac_Latn", "Kamba": "kam_Latn", "Kannada": "kan_Knda", "Kashmiri Arabic": "kas_Arab", "Kashmiri Devanagari": "kas_Deva", "Georgian": "kat_Geor", "Central Kanuri Arabic": "knc_Arab", "Central Kanuri Latin": "knc_Latn", "Kazakh": "kaz_Cyrl", "Kabiyè": "kbp_Latn", "Kabuverdianu": "kea_Latn", "Khmer": "khm_Khmr", "Kikuyu": "kik_Latn", "Kinyarwanda": "kin_Latn", "Kyrgyz": "kir_Cyrl", "Kimbundu": "kmb_Latn", "Northern Kurdish": "kmr_Latn", "Kikongo": "kon_Latn", "Korean": "kor_Hang", "Lao": "lao_Laoo", "Ligurian": "lij_Latn", "Limburgish": "lim_Latn", "Lingala": "lin_Latn", "Lithuanian": "lit_Latn", "Lombard": "lmo_Latn", "Latgalian": "ltg_Latn", "Luxembourgish": "ltz_Latn", "Luba-Kasai": "lua_Latn", "Ganda": "lug_Latn", "Luo": "luo_Latn", "Mizo": "lus_Latn", "Standard Latvian": "lvs_Latn", "Magahi": "mag_Deva", "Maithili": "mai_Deva", "Malayalam": "mal_Mlym", "Marathi": "mar_Deva", "Minangkabau Arabic ": "min_Arab", "Minangkabau Latin": "min_Latn", "Macedonian": "mkd_Cyrl", "Plateau Malagasy": "plt_Latn", "Maltese": "mlt_Latn", "Meitei Bengali": "mni_Beng", "Halh Mongolian": "khk_Cyrl", "Mossi": "mos_Latn", "Maori": "mri_Latn", "Burmese": "mya_Mymr", "Dutch": "nld_Latn", "Norwegian Nynorsk": "nno_Latn", "Norwegian Bokmål": "nob_Latn", "Nepali": "npi_Deva", "Northern Sotho": "nso_Latn", "Nuer": "nus_Latn", "Nyanja": "nya_Latn", "Occitan": "oci_Latn", "West Central Oromo": "gaz_Latn", "Odia": "ory_Orya", "Pangasinan": "pag_Latn", "Eastern Panjabi": "pan_Guru", "Papiamento": "pap_Latn", "Western Persian": "pes_Arab", "Polish": "pol_Latn", "Portuguese": "por_Latn", "Dari": "prs_Arab", "Southern Pashto": "pbt_Arab", "Ayacucho Quechua": "quy_Latn", "Romanian": "ron_Latn", "Rundi": "run_Latn", "Russian": "rus_Cyrl", "Sango": "sag_Latn", "Sanskrit": "san_Deva", "Santali": "sat_Olck", "Sicilian": "scn_Latn", "Shan": "shn_Mymr", "Sinhala": "sin_Sinh", "Slovak": "slk_Latn", "Slovenian": "slv_Latn", "Samoan": "smo_Latn", "Shona": "sna_Latn", "Sindhi": "snd_Arab", "Somali": "som_Latn", "Southern Sotho": "sot_Latn", "Spanish": "spa_Latn", "Tosk Albanian": "als_Latn", "Sardinian": "srd_Latn", "Serbian": "srp_Cyrl", "Swati": "ssw_Latn", "Sundanese": "sun_Latn", "Swedish": "swe_Latn", "Swahili": "swh_Latn", "Silesian": "szl_Latn", "Tamil": "tam_Taml", "Tatar": "tat_Cyrl", "Telugu": "tel_Telu", "Tajik": "tgk_Cyrl", "Tagalog": "tgl_Latn", "Thai": "tha_Thai", "Tigrinya": "tir_Ethi", "Tamasheq Latin": "taq_Latn", "Tamasheq Tifinagh": "taq_Tfng", "Tok Pisin": "tpi_Latn", "Tswana": "tsn_Latn", "Tsonga": "tso_Latn", "Turkmen": "tuk_Latn", "Tumbuka": "tum_Latn", "Turkish": "tur_Latn", "Twi": "twi_Latn", "Central Atlas Tamazight": "tzm_Tfng", "Uyghur": "uig_Arab", "Ukrainian": "ukr_Cyrl", "Umbundu": "umb_Latn", "Urdu": "urd_Arab", "Northern Uzbek": "uzn_Latn", "Venetian": "vec_Latn", "Vietnamese": "vie_Latn", "Waray": "war_Latn", "Wolof": "wol_Latn", "Xhosa": "xho_Latn", "Eastern Yiddish": "ydd_Hebr", "Yoruba": "yor_Latn", "Yue Chinese": "yue_Hant", "Chinese Simplified": "zho_Hans", "Chinese Traditional": "zho_Hant", "Standard Malay": "zsm_Latn", "Zulu": "zul_Latn", } class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Tuple = 'facebook/nllb-200-distilled-600M' snake_case__ :Optional[Any] = ( 'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ' 'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ' 'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ' 'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.' ) snake_case__ :List[Any] = 'translator' snake_case__ :List[Any] = AutoTokenizer snake_case__ :Optional[Any] = AutoModelForSeqaSeqLM snake_case__ :List[str] = LANGUAGE_CODES snake_case__ :List[Any] = ['text', 'text', 'text'] snake_case__ :List[Any] = ['text'] def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ): """simple docstring""" if src_lang not in self.lang_to_code: raise ValueError(f"""{src_lang} is not a supported language.""" ) if tgt_lang not in self.lang_to_code: raise ValueError(f"""{tgt_lang} is not a supported language.""" ) lowerCAmelCase__ = self.lang_to_code[src_lang] lowerCAmelCase__ = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( __magic_name__ , return_tensors="pt" , src_lang=__magic_name__ , tgt_lang=__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] ): """simple docstring""" return self.model.generate(**__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Tuple ): """simple docstring""" return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__magic_name__ )
48
0
"""simple docstring""" import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def _snake_case ( _snake_case : Any , _snake_case : str=7 ) -> Optional[Any]: '''simple docstring''' _A = None if token is not None: _A = {'Accept': 'application/vnd.github+json', 'Authorization': F'''Bearer {token}'''} # The id of a workflow (not of a workflow run) _A = '636036' _A = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs''' # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}''' _A = requests.get(_snake_case , headers=_snake_case ).json() return result["workflow_runs"] def _snake_case ( _snake_case : List[str] ) -> Optional[int]: '''simple docstring''' _A = get_daily_ci_runs(_snake_case ) _A = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": _A = workflow_run['id'] break return workflow_run_id def _snake_case ( _snake_case : Optional[int] , _snake_case : Dict , _snake_case : Any ) -> Dict: '''simple docstring''' _A = get_last_daily_ci_runs(_snake_case ) if workflow_run_id is not None: _A = get_artifacts_links(worflow_run_id=_snake_case , token=_snake_case ) for artifact_name in artifact_names: if artifact_name in artifacts_links: _A = artifacts_links[artifact_name] download_artifact( artifact_name=_snake_case , artifact_url=_snake_case , output_dir=_snake_case , token=_snake_case ) def _snake_case ( _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Tuple ) -> str: '''simple docstring''' get_last_daily_ci_artifacts(_snake_case , _snake_case , _snake_case ) _A = {} for artifact_name in artifact_names: _A = os.path.join(_snake_case , F'''{artifact_name}.zip''' ) if os.path.isfile(_snake_case ): _A = {} with zipfile.ZipFile(_snake_case ) as z: for filename in z.namelist(): if not os.path.isdir(_snake_case ): # read the file with z.open(_snake_case ) as f: _A = f.read().decode('UTF-8' ) return results
7
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ : int = logging.get_logger(__name__) class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Any = 'timm_backbone' def __init__( self : Tuple , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=3 , __magic_name__ : Dict=True , __magic_name__ : str=True , __magic_name__ : List[Any]=None , **__magic_name__ : Tuple , ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = backbone lowerCAmelCase__ = num_channels lowerCAmelCase__ = features_only lowerCAmelCase__ = use_pretrained_backbone lowerCAmelCase__ = True lowerCAmelCase__ = out_indices if out_indices is not None else (-1,)
48
0
'''simple docstring''' import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ): lowerCAmelCase = BioGptTokenizer lowerCAmelCase = False def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __A : Optional[int] = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] __A : Union[str, Any] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase)))) __A : str = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] __A : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file']) __A : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file']) with open(self.vocab_file , 'w') as fp: fp.write(json.dumps(_UpperCAmelCase)) with open(self.merges_file , 'w') as fp: fp.write('\n'.join(_UpperCAmelCase)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase): '''simple docstring''' __A : int = 'lower newer' __A : int = 'lower newer' return input_text, output_text def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Tuple = BioGptTokenizer(self.vocab_file , self.merges_file) __A : List[Any] = 'lower' __A : Optional[int] = ['low', 'er</w>'] __A : Optional[Any] = tokenizer.tokenize(_UpperCAmelCase) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase) __A : List[Any] = tokens + ['<unk>'] __A : Optional[int] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase) , _UpperCAmelCase) @slow def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Any = BioGptTokenizer.from_pretrained('microsoft/biogpt') __A : Union[str, Any] = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCAmelCase) __A : Any = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCAmelCase) __A : Any = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase) __A : List[str] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase) self.assertTrue(encoded_sentence == [2] + text) self.assertTrue(encoded_pair == [2] + text + [2] + text_a)
8
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Tuple = 'Salesforce/blip-image-captioning-base' snake_case__ :List[Any] = ( 'This is a tool that generates a description of an image. It takes an input named `image` which should be the ' 'image to caption, and returns a text that contains the description in English.' ) snake_case__ :List[Any] = 'image_captioner' snake_case__ :Optional[int] = AutoModelForVisionaSeq snake_case__ :Optional[int] = ['image'] snake_case__ :Any = ['text'] def __init__( self : str , *__magic_name__ : List[str] , **__magic_name__ : Tuple ): """simple docstring""" requires_backends(self , ["vision"] ) super().__init__(*__magic_name__ , **__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : "Image" ): """simple docstring""" return self.pre_processor(images=__magic_name__ , return_tensors="pt" ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Tuple ): """simple docstring""" return self.model.generate(**__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Optional[int] ): """simple docstring""" return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0].strip()
48
0
import math def A ( __UpperCamelCase ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__UpperCamelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A ( __UpperCamelCase = 10_001 ) -> int: try: A__ = int(__UpperCamelCase ) except (TypeError, ValueError): raise TypeError('Parameter nth must be int or castable to int.' ) from None if nth <= 0: raise ValueError('Parameter nth must be greater than or equal to one.' ) A__ = [] A__ = 2 while len(__UpperCamelCase ) < nth: if is_prime(__UpperCamelCase ): primes.append(__UpperCamelCase ) num += 1 else: num += 1 return primes[len(__UpperCamelCase ) - 1] if __name__ == "__main__": print(f'{solution() = }')
9
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging UpperCAmelCase__ : Tuple = logging.get_logger(__name__) UpperCAmelCase__ : Union[str, Any] = "▁" UpperCAmelCase__ : List[str] = {"vocab_file": "sentencepiece.bpe.model"} UpperCAmelCase__ : Union[str, Any] = { "vocab_file": { "facebook/mbart-large-50-one-to-many-mmt": ( "https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model" ), } } UpperCAmelCase__ : Optional[Any] = { "facebook/mbart-large-50-one-to-many-mmt": 10_24, } # fmt: off UpperCAmelCase__ : Tuple = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"] class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Optional[int] = VOCAB_FILES_NAMES snake_case__ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ :Any = PRETRAINED_VOCAB_FILES_MAP snake_case__ :Tuple = ['input_ids', 'attention_mask'] snake_case__ :List[int] = [] snake_case__ :List[int] = [] def __init__( self : int , __magic_name__ : int , __magic_name__ : Dict=None , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[int]="</s>" , __magic_name__ : List[Any]="</s>" , __magic_name__ : List[Any]="<s>" , __magic_name__ : Tuple="<unk>" , __magic_name__ : List[Any]="<pad>" , __magic_name__ : List[Any]="<mask>" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : List[Any] , ): """simple docstring""" lowerCAmelCase__ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs lowerCAmelCase__ = kwargs.get("additional_special_tokens" , [] ) kwargs["additional_special_tokens"] += [ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=__magic_name__ , tgt_lang=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , ) lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__magic_name__ ) ) lowerCAmelCase__ = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token lowerCAmelCase__ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowerCAmelCase__ = 1 lowerCAmelCase__ = len(self.sp_model ) lowerCAmelCase__ = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__magic_name__ ) } lowerCAmelCase__ = {v: k for k, v in self.lang_code_to_id.items()} lowerCAmelCase__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) lowerCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} lowerCAmelCase__ = src_lang if src_lang is not None else "en_XX" lowerCAmelCase__ = self.lang_code_to_id[self._src_lang] lowerCAmelCase__ = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def __SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" return self._src_lang @src_lang.setter def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ): """simple docstring""" lowerCAmelCase__ = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : Dict ): """simple docstring""" lowerCAmelCase__ = self.__dict__.copy() lowerCAmelCase__ = None return state def __setstate__( self : List[Any] , __magic_name__ : Dict ): """simple docstring""" lowerCAmelCase__ = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): lowerCAmelCase__ = {} lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" lowerCAmelCase__ = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : str ): """simple docstring""" return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ): """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowerCAmelCase__ = self.sp_model.PieceToId(__magic_name__ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : int ): """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[Any] ): """simple docstring""" lowerCAmelCase__ = [] lowerCAmelCase__ = "" lowerCAmelCase__ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__magic_name__ ) + token lowerCAmelCase__ = True lowerCAmelCase__ = [] else: current_sub_tokens.append(__magic_name__ ) lowerCAmelCase__ = False out_string += self.sp_model.decode(__magic_name__ ) return out_string.strip() def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ): """simple docstring""" if not os.path.isdir(__magic_name__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ = os.path.join( __magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __magic_name__ ) elif not os.path.isfile(self.vocab_file ): with open(__magic_name__ , "wb" ) as fi: lowerCAmelCase__ = self.sp_model.serialized_model_proto() fi.write(__magic_name__ ) return (out_vocab_file,) def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ ) lowerCAmelCase__ = [1] * len(self.prefix_tokens ) lowerCAmelCase__ = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__magic_name__ )) + suffix_ones return prefix_ones + ([0] * len(__magic_name__ )) + ([0] * len(__magic_name__ )) + suffix_ones def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Optional[str] , __magic_name__ : Optional[str] , **__magic_name__ : Optional[Any] ): """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) lowerCAmelCase__ = src_lang lowerCAmelCase__ = self(__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ ) lowerCAmelCase__ = self.convert_tokens_to_ids(__magic_name__ ) lowerCAmelCase__ = tgt_lang_id return inputs def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : str = "en_XX" , __magic_name__ : Optional[List[str]] = None , __magic_name__ : str = "ro_RO" , **__magic_name__ : Union[str, Any] , ): """simple docstring""" lowerCAmelCase__ = src_lang lowerCAmelCase__ = tgt_lang return super().prepare_seqaseq_batch(__magic_name__ , __magic_name__ , **__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ): """simple docstring""" lowerCAmelCase__ = self.lang_code_to_id[src_lang] lowerCAmelCase__ = [self.cur_lang_code_id] lowerCAmelCase__ = [self.eos_token_id] def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ): """simple docstring""" lowerCAmelCase__ = self.lang_code_to_id[tgt_lang] lowerCAmelCase__ = [self.cur_lang_code_id] lowerCAmelCase__ = [self.eos_token_id]
48
0
from __future__ import annotations _lowerCAmelCase = 8.988E9 # units = N * m^s * C^-2 def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ): _UpperCamelCase = abs(chargea * chargea ) if (force, chargea, chargea, distance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if distance < 0: raise ValueError('''Distance cannot be negative''' ) if force == 0: _UpperCamelCase = COULOMBS_CONSTANT * charge_product / (distance**2) return {"force": force} elif chargea == 0: _UpperCamelCase = abs(__snake_case ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge1": chargea} elif chargea == 0: _UpperCamelCase = abs(__snake_case ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge2": chargea} elif distance == 0: _UpperCamelCase = (COULOMBS_CONSTANT * charge_product / abs(__snake_case )) ** 0.5 return {"distance": distance} raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
10
'''simple docstring''' from random import randint from tempfile import TemporaryFile import numpy as np def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] ) -> Dict: '''simple docstring''' lowerCAmelCase__ = 0 if start < end: lowerCAmelCase__ = randint(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ = a[end] lowerCAmelCase__ = a[pivot] lowerCAmelCase__ = temp lowerCAmelCase__ ,lowerCAmelCase__ = _in_place_partition(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) count += _in_place_quick_sort(UpperCamelCase_ , UpperCamelCase_ , p - 1 ) count += _in_place_quick_sort(UpperCamelCase_ , p + 1 , UpperCamelCase_ ) return count def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any ) -> Dict: '''simple docstring''' lowerCAmelCase__ = 0 lowerCAmelCase__ = randint(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ = a[end] lowerCAmelCase__ = a[pivot] lowerCAmelCase__ = temp lowerCAmelCase__ = start - 1 for index in range(UpperCamelCase_ , UpperCamelCase_ ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value lowerCAmelCase__ = new_pivot_index + 1 lowerCAmelCase__ = a[new_pivot_index] lowerCAmelCase__ = a[index] lowerCAmelCase__ = temp lowerCAmelCase__ = a[new_pivot_index + 1] lowerCAmelCase__ = a[end] lowerCAmelCase__ = temp return new_pivot_index + 1, count UpperCAmelCase__ : Tuple = TemporaryFile() UpperCAmelCase__ : List[str] = 1_00 # 1000 elements are to be sorted UpperCAmelCase__ , UpperCAmelCase__ : Dict = 0, 1 # mean and standard deviation UpperCAmelCase__ : Tuple = np.random.normal(mu, sigma, p) np.save(outfile, X) print("The array is") print(X) outfile.seek(0) # using the same array UpperCAmelCase__ : Optional[Any] = np.load(outfile) UpperCAmelCase__ : Any = len(M) - 1 UpperCAmelCase__ : Tuple = _in_place_quick_sort(M, 0, r) print( "No of Comparisons for 100 elements selected from a standard normal distribution" "is :" ) print(z)
48
0
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { "google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json", # See all umt5 models at https://huggingface.co/models?filter=umt5 } class __A ( A ): '''simple docstring''' __lowerCamelCase : Tuple = 'umt5' __lowerCamelCase : Optional[int] = ['past_key_values'] def __init__(self , A=250_112 , A=512 , A=64 , A=1_024 , A=8 , A=None , A=6 , A=32 , A=128 , A=0.1 , A=1E-6 , A=1.0 , A="gated-gelu" , A=True , A=True , A="T5Tokenizer" , A=True , A=0 , A=1 , A=0 , **A , ) -> Any: """simple docstring""" super().__init__( is_encoder_decoder=A , tokenizer_class=A , tie_word_embeddings=A , pad_token_id=A , eos_token_id=A , decoder_start_token_id=A , **A , ) _a = vocab_size _a = d_model _a = d_kv _a = d_ff _a = num_layers _a = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry _a = num_heads _a = relative_attention_num_buckets _a = relative_attention_max_distance _a = dropout_rate _a = layer_norm_epsilon _a = initializer_factor _a = feed_forward_proj _a = use_cache _a = self.feed_forward_proj.split('''-''' ) _a = act_info[-1] _a = act_info[0] == '''gated''' if len(A ) > 1 and act_info[0] != "gated" or len(A ) > 2: raise ValueError( f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) if feed_forward_proj == "gated-gelu": _a = '''gelu_new''' @property def a__ (self ) -> Any: """simple docstring""" return self.d_model @property def a__ (self ) -> Tuple: """simple docstring""" return self.num_heads @property def a__ (self ) -> List[Any]: """simple docstring""" return self.num_layers class __A ( A ): '''simple docstring''' @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def a__ (self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" _a = { '''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''}, '''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''}, } if self.use_past: _a = '''past_encoder_sequence + sequence''' _a = {0: '''batch'''} _a = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: _a = {0: '''batch''', 1: '''decoder_sequence'''} _a = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(A , direction='''inputs''' ) return common_inputs @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def a__ (self ) -> int: """simple docstring""" return 13 @property def a__ (self ) -> float: """simple docstring""" return 5E-4
11
'''simple docstring''' import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def A ( UpperCamelCase_ : List[Any] ) -> Tuple: '''simple docstring''' if "img_encoder.pos_embed" in name: lowerCAmelCase__ = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" ) if "img_encoder.patch_embed.proj" in name: lowerCAmelCase__ = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" ) if "img_encoder.patch_embed.norm" in name: lowerCAmelCase__ = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" ) if "img_encoder.layers" in name: lowerCAmelCase__ = name.replace("img_encoder.layers" , "vision_model.encoder.stages" ) if "blocks" in name and "res" not in name: lowerCAmelCase__ = name.replace("blocks" , "layers" ) if "attn" in name and "pre_assign" not in name: lowerCAmelCase__ = name.replace("attn" , "self_attn" ) if "proj" in name and "self_attn" in name and "text" not in name: lowerCAmelCase__ = name.replace("proj" , "out_proj" ) if "pre_assign_attn.attn.proj" in name: lowerCAmelCase__ = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" ) if "norm1" in name: lowerCAmelCase__ = name.replace("norm1" , "layer_norm1" ) if "norm2" in name and "pre_assign" not in name: lowerCAmelCase__ = name.replace("norm2" , "layer_norm2" ) if "img_encoder.norm" in name: lowerCAmelCase__ = name.replace("img_encoder.norm" , "vision_model.layernorm" ) # text encoder if "text_encoder.token_embedding" in name: lowerCAmelCase__ = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" ) if "text_encoder.positional_embedding" in name: lowerCAmelCase__ = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" ) if "text_encoder.transformer.resblocks." in name: lowerCAmelCase__ = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." ) if "ln_1" in name: lowerCAmelCase__ = name.replace("ln_1" , "layer_norm1" ) if "ln_2" in name: lowerCAmelCase__ = name.replace("ln_2" , "layer_norm2" ) if "c_fc" in name: lowerCAmelCase__ = name.replace("c_fc" , "fc1" ) if "c_proj" in name: lowerCAmelCase__ = name.replace("c_proj" , "fc2" ) if "text_encoder" in name: lowerCAmelCase__ = name.replace("text_encoder" , "text_model" ) if "ln_final" in name: lowerCAmelCase__ = name.replace("ln_final" , "final_layer_norm" ) # projection layers if "img_projector.linear_hidden." in name: lowerCAmelCase__ = name.replace("img_projector.linear_hidden." , "visual_projection." ) if "img_projector.linear_out." in name: lowerCAmelCase__ = name.replace("img_projector.linear_out." , "visual_projection.3." ) if "text_projector.linear_hidden" in name: lowerCAmelCase__ = name.replace("text_projector.linear_hidden" , "text_projection" ) if "text_projector.linear_out" in name: lowerCAmelCase__ = name.replace("text_projector.linear_out" , "text_projection.3" ) return name def A ( UpperCamelCase_ : str , UpperCamelCase_ : str ) -> List[Any]: '''simple docstring''' for key in orig_state_dict.copy().keys(): lowerCAmelCase__ = orig_state_dict.pop(UpperCamelCase_ ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors lowerCAmelCase__ = key.split("." ) lowerCAmelCase__ ,lowerCAmelCase__ = int(key_split[2] ), int(key_split[4] ) lowerCAmelCase__ = config.vision_config.hidden_size if "weight" in key: lowerCAmelCase__ = val[:dim, :] lowerCAmelCase__ = val[dim : dim * 2, :] lowerCAmelCase__ = val[-dim:, :] else: lowerCAmelCase__ = val[:dim] lowerCAmelCase__ = val[dim : dim * 2] lowerCAmelCase__ = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors lowerCAmelCase__ = key.split("." ) lowerCAmelCase__ = int(key_split[3] ) lowerCAmelCase__ = config.text_config.hidden_size if "weight" in key: lowerCAmelCase__ = val[:dim, :] lowerCAmelCase__ = val[ dim : dim * 2, : ] lowerCAmelCase__ = val[-dim:, :] else: lowerCAmelCase__ = val[:dim] lowerCAmelCase__ = val[dim : dim * 2] lowerCAmelCase__ = val[-dim:] else: lowerCAmelCase__ = rename_key(UpperCamelCase_ ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): lowerCAmelCase__ = val.squeeze_() else: lowerCAmelCase__ = val return orig_state_dict def A ( ) -> Optional[int]: '''simple docstring''' lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ) return im @torch.no_grad() def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple="groupvit-gcc-yfcc" , UpperCamelCase_ : Dict=False ) -> Any: '''simple docstring''' lowerCAmelCase__ = GroupViTConfig() lowerCAmelCase__ = GroupViTModel(UpperCamelCase_ ).eval() lowerCAmelCase__ = torch.load(UpperCamelCase_ , map_location="cpu" )["model"] lowerCAmelCase__ = convert_state_dict(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ ,lowerCAmelCase__ = model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCamelCase_ ) == 0) # verify result lowerCAmelCase__ = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" ) lowerCAmelCase__ = prepare_img() lowerCAmelCase__ = processor(text=["a photo of a cat", "a photo of a dog"] , images=UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors="pt" ) with torch.no_grad(): lowerCAmelCase__ = model(**UpperCamelCase_ ) if model_name == "groupvit-gcc-yfcc": lowerCAmelCase__ = torch.tensor([[13.3_523, 6.3_629]] ) elif model_name == "groupvit-gcc-redcaps": lowerCAmelCase__ = torch.tensor([[16.1_873, 8.6_230]] ) else: raise ValueError(F"""Model name {model_name} not supported.""" ) assert torch.allclose(outputs.logits_per_image , UpperCamelCase_ , atol=1E-3 ) processor.save_pretrained(UpperCamelCase_ ) model.save_pretrained(UpperCamelCase_ ) print("Successfully saved processor and model to" , UpperCamelCase_ ) if push_to_hub: print("Pushing to the hub..." ) processor.push_to_hub(UpperCamelCase_ , organization="nielsr" ) model.push_to_hub(UpperCamelCase_ , organization="nielsr" ) if __name__ == "__main__": UpperCAmelCase__ : List[str] = argparse.ArgumentParser() parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model." ) parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint") parser.add_argument( "--model_name", default="groupvit-gccy-fcc", type=str, help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'", ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.", ) UpperCAmelCase__ : Any = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
48
0
import itertools import string from collections.abc import Generator, Iterable def UpperCamelCase ( lowercase_ , lowercase_ ) -> Generator[tuple[str, ...], None, None]: '''simple docstring''' lowercase__ : Any = iter(lowercase_ ) while True: lowercase__ : Any = tuple(itertools.islice(lowercase_ , lowercase_ ) ) if not chunk: return yield chunk def UpperCamelCase ( lowercase_ ) -> str: '''simple docstring''' lowercase__ : Union[str, Any] = """""".join([c.upper() for c in dirty if c in string.ascii_letters] ) lowercase__ : Union[str, Any] = """""" if len(lowercase_ ) < 2: return dirty for i in range(len(lowercase_ ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(lowercase_ ) & 1: clean += "X" return clean def UpperCamelCase ( lowercase_ ) -> list[str]: '''simple docstring''' lowercase__ : Optional[Any] = """ABCDEFGHIKLMNOPQRSTUVWXYZ""" # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler lowercase__ : List[Any] = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(lowercase_ ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(lowercase_ ) return table def UpperCamelCase ( lowercase_ , lowercase_ ) -> str: '''simple docstring''' lowercase__ : str = generate_table(lowercase_ ) lowercase__ : Optional[int] = prepare_input(lowercase_ ) lowercase__ : Optional[int] = """""" # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(lowercase_ , 2 ): lowercase__ , lowercase__ : Dict = divmod(table.index(lowercase_ ) , 5 ) lowercase__ , lowercase__ : Optional[int] = divmod(table.index(lowercase_ ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def UpperCamelCase ( lowercase_ , lowercase_ ) -> str: '''simple docstring''' lowercase__ : int = generate_table(lowercase_ ) lowercase__ : Tuple = """""" # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(lowercase_ , 2 ): lowercase__ , lowercase__ : Optional[int] = divmod(table.index(lowercase_ ) , 5 ) lowercase__ , lowercase__ : int = divmod(table.index(lowercase_ ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
12
'''simple docstring''' from __future__ import annotations from functools import lru_cache from math import ceil UpperCAmelCase__ : Optional[Any] = 1_00 UpperCAmelCase__ : Any = set(range(3, NUM_PRIMES, 2)) primes.add(2) UpperCAmelCase__ : int for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=1_00 ) def A ( UpperCamelCase_ : int ) -> set[int]: '''simple docstring''' if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} lowerCAmelCase__ = set() lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def A ( UpperCamelCase_ : int = 50_00 ) -> int | None: '''simple docstring''' for number_to_partition in range(1 , UpperCamelCase_ ): if len(partition(UpperCamelCase_ ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(F"{solution() = }")
48
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available A__ : List[Any] = { """configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : int = [ """GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """GraphormerForGraphClassification""", """GraphormerModel""", """GraphormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys A__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
13
'''simple docstring''' import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase__ : List[Any] = logging.get_logger(__name__) UpperCAmelCase__ : List[str] = {"vocab_file": "vocab.json"} UpperCAmelCase__ : Optional[Any] = { "vocab_file": { "mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json", } } UpperCAmelCase__ : Union[str, Any] = {"mgp-str": 27} class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Any = VOCAB_FILES_NAMES snake_case__ :Dict = PRETRAINED_VOCAB_FILES_MAP snake_case__ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : int="[GO]" , __magic_name__ : Optional[Any]="[GO]" , __magic_name__ : List[str]="[s]" , __magic_name__ : str="[GO]" , **__magic_name__ : List[Any] ): """simple docstring""" super().__init__( unk_token=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , pad_token=__magic_name__ , **__magic_name__ , ) with open(__magic_name__ , encoding="utf-8" ) as vocab_handle: lowerCAmelCase__ = json.load(__magic_name__ ) lowerCAmelCase__ = {v: k for k, v in self.vocab.items()} @property def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" return len(self.vocab ) def __SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Dict ): """simple docstring""" lowerCAmelCase__ = [] for s in text: char_tokens.extend(__magic_name__ ) return char_tokens def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str ): """simple docstring""" return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) ) def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : Tuple ): """simple docstring""" return self.decoder.get(__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str , __magic_name__ : Optional[str] = None ): """simple docstring""" if not os.path.isdir(__magic_name__ ): logger.error("Vocabulary path ({}) should be a directory".format(__magic_name__ ) ) return lowerCAmelCase__ = os.path.join( __magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) with open(__magic_name__ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=__magic_name__ , ensure_ascii=__magic_name__ ) + "\n" ) return (vocab_file,)
48
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL a__ = logging.get_logger(__name__) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : Optional[int] = ["pixel_values"] def __init__( self , _a = True , _a = None , _a = PILImageResampling.BICUBIC , _a = True , _a = 1 / 2_5_5 , _a = True , _a = None , _a = None , _a = True , **_a , ) -> None: super().__init__(**_a ) _a : str = size if size is not None else {'''height''': 3_8_4, '''width''': 3_8_4} _a : Union[str, Any] = get_size_dict(_a , default_to_square=_a ) _a : str = do_resize _a : str = size _a : Optional[int] = resample _a : Optional[int] = do_rescale _a : Tuple = rescale_factor _a : str = do_normalize _a : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN _a : Union[str, Any] = image_std if image_std is not None else OPENAI_CLIP_STD _a : int = do_convert_rgb def __lowercase ( self , _a , _a , _a = PILImageResampling.BICUBIC , _a = None , **_a , ) -> np.ndarray: _a : List[str] = get_size_dict(_a , default_to_square=_a ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" ) _a : int = (size['''height'''], size['''width''']) return resize(_a , size=_a , resample=_a , data_format=_a , **_a ) def __lowercase ( self , _a , _a , _a = None , **_a , ) -> Optional[int]: return rescale(_a , scale=_a , data_format=_a , **_a ) def __lowercase ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray: return normalize(_a , mean=_a , std=_a , data_format=_a , **_a ) def __lowercase ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image: _a : str = do_resize if do_resize is not None else self.do_resize _a : Optional[int] = resample if resample is not None else self.resample _a : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale _a : Any = rescale_factor if rescale_factor is not None else self.rescale_factor _a : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize _a : Optional[Any] = image_mean if image_mean is not None else self.image_mean _a : Tuple = image_std if image_std is not None else self.image_std _a : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb _a : List[Any] = size if size is not None else self.size _a : Optional[Any] = get_size_dict(_a , default_to_square=_a ) _a : List[Any] = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: _a : List[str] = [convert_to_rgb(_a ) for image in images] # All transformations expect numpy arrays. _a : Dict = [to_numpy_array(_a ) for image in images] if do_resize: _a : Any = [self.resize(image=_a , size=_a , resample=_a ) for image in images] if do_rescale: _a : List[str] = [self.rescale(image=_a , scale=_a ) for image in images] if do_normalize: _a : List[Any] = [self.normalize(image=_a , mean=_a , std=_a ) for image in images] _a : Optional[int] = [to_channel_dimension_format(_a , _a ) for image in images] _a : Optional[Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=_a ) return encoded_outputs
14
'''simple docstring''' from math import sqrt def A ( UpperCamelCase_ : int ) -> int: '''simple docstring''' lowerCAmelCase__ = 0 for i in range(1 , int(sqrt(UpperCamelCase_ ) + 1 ) ): if n % i == 0 and i != sqrt(UpperCamelCase_ ): total += i + n // i elif i == sqrt(UpperCamelCase_ ): total += i return total - n def A ( UpperCamelCase_ : int = 1_00_00 ) -> int: '''simple docstring''' lowerCAmelCase__ = sum( i for i in range(1 , UpperCamelCase_ ) if sum_of_divisors(sum_of_divisors(UpperCamelCase_ ) ) == i and sum_of_divisors(UpperCamelCase_ ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
48
0
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A : List[Any] = logging.get_logger(__name__) A : Union[str, Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} # See all BART models at https://huggingface.co/models?filter=bart A : str = { 'vocab_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json', }, 'merges_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt', }, } A : Union[str, Any] = { 'facebook/bart-base': 1_0_2_4, 'facebook/bart-large': 1_0_2_4, 'facebook/bart-large-mnli': 1_0_2_4, 'facebook/bart-large-cnn': 1_0_2_4, 'facebook/bart-large-xsum': 1_0_2_4, 'yjernite/bart_eli5': 1_0_2_4, } @lru_cache() def UpperCamelCase ( ) -> Optional[int]: """simple docstring""" lowercase__ = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) lowercase__ = bs[:] lowercase__ = 0 for b in range(2**8 ): if b not in bs: bs.append(__magic_name__ ) cs.append(2**8 + n ) n += 1 lowercase__ = [chr(__magic_name__ ) for n in cs] return dict(zip(__magic_name__ , __magic_name__ ) ) def UpperCamelCase ( __magic_name__ : str ) -> Tuple: """simple docstring""" lowercase__ = set() lowercase__ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowercase__ = char return pairs class A ( UpperCAmelCase__ ): '''simple docstring''' A__ = VOCAB_FILES_NAMES A__ = PRETRAINED_VOCAB_FILES_MAP A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ = ['''input_ids''', '''attention_mask'''] def __init__(self : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any]="replace" , _UpperCAmelCase : Tuple="<s>" , _UpperCAmelCase : Dict="</s>" , _UpperCAmelCase : List[str]="</s>" , _UpperCAmelCase : Dict="<s>" , _UpperCAmelCase : int="<unk>" , _UpperCAmelCase : Any="<pad>" , _UpperCAmelCase : int="<mask>" , _UpperCAmelCase : Optional[int]=False , **_UpperCAmelCase : int , ) -> Optional[int]: """simple docstring""" lowercase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else bos_token lowercase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token lowercase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else sep_token lowercase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cls_token lowercase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token lowercase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowercase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token super().__init__( errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , ) with open(_UpperCAmelCase , encoding="""utf-8""" ) as vocab_handle: lowercase__ = json.load(_UpperCAmelCase ) lowercase__ = {v: k for k, v in self.encoder.items()} lowercase__ = errors # how to handle errors in decoding lowercase__ = bytes_to_unicode() lowercase__ = {v: k for k, v in self.byte_encoder.items()} with open(_UpperCAmelCase , encoding="""utf-8""" ) as merges_handle: lowercase__ = merges_handle.read().split("""\n""" )[1:-1] lowercase__ = [tuple(merge.split() ) for merge in bpe_merges] lowercase__ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) lowercase__ = {} lowercase__ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowercase__ = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property def lowerCamelCase__ (self : Tuple ) -> Tuple: """simple docstring""" return len(self.encoder ) def lowerCamelCase__ (self : Optional[Any] ) -> Dict: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def lowerCamelCase__ (self : str , _UpperCAmelCase : List[Any] ) -> str: """simple docstring""" if token in self.cache: return self.cache[token] lowercase__ = tuple(_UpperCAmelCase ) lowercase__ = get_pairs(_UpperCAmelCase ) if not pairs: return token while True: lowercase__ = min(_UpperCAmelCase , key=lambda _UpperCAmelCase : self.bpe_ranks.get(_UpperCAmelCase , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break lowercase__ , lowercase__ = bigram lowercase__ = [] lowercase__ = 0 while i < len(_UpperCAmelCase ): try: lowercase__ = word.index(_UpperCAmelCase , _UpperCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowercase__ = j if word[i] == first and i < len(_UpperCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowercase__ = tuple(_UpperCAmelCase ) lowercase__ = new_word if len(_UpperCAmelCase ) == 1: break else: lowercase__ = get_pairs(_UpperCAmelCase ) lowercase__ = """ """.join(_UpperCAmelCase ) lowercase__ = word return word def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Any ) -> Optional[Any]: """simple docstring""" lowercase__ = [] for token in re.findall(self.pat , _UpperCAmelCase ): lowercase__ = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCAmelCase ).split(""" """ ) ) return bpe_tokens def lowerCamelCase__ (self : int , _UpperCAmelCase : int ) -> Any: """simple docstring""" return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token ) ) def lowerCamelCase__ (self : int , _UpperCAmelCase : Optional[Any] ) -> Dict: """simple docstring""" return self.decoder.get(_UpperCAmelCase ) def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : Optional[Any] ) -> List[Any]: """simple docstring""" lowercase__ = """""".join(_UpperCAmelCase ) lowercase__ = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors ) return text def lowerCamelCase__ (self : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(_UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase__ = os.path.join( _UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) lowercase__ = os.path.join( _UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase ) + """\n""" ) lowercase__ = 0 with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCAmelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' """ Please check that the tokenizer is not corrupted!""" ) lowercase__ = token_index writer.write(""" """.join(_UpperCAmelCase ) + """\n""" ) index += 1 return vocab_file, merge_file def lowerCamelCase__ (self : Any , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase__ = [self.cls_token_id] lowercase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(_UpperCAmelCase )) + [1] return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1] def lowerCamelCase__ (self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[str]=False , **_UpperCAmelCase : List[Any] ) -> Any: """simple docstring""" lowercase__ = kwargs.pop("""add_prefix_space""" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_UpperCAmelCase ) > 0 and not text[0].isspace()): lowercase__ = """ """ + text return (text, kwargs)
15
'''simple docstring''' import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format="%(message)s") def A ( UpperCamelCase_ : np.ndarray ) -> np.ndarray: '''simple docstring''' return input_array.reshape((input_array.size, 1) ) def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray: '''simple docstring''' lowerCAmelCase__ = np.nan for i in range(UpperCamelCase_ ): lowerCAmelCase__ = features[:, labels == i] lowerCAmelCase__ = data.mean(1 ) # Centralize the data of class i lowerCAmelCase__ = data - column_reshape(UpperCamelCase_ ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(UpperCamelCase_ , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) lowerCAmelCase__ = np.dot(UpperCamelCase_ , centered_data.T ) return covariance_sum / features.shape[1] def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray: '''simple docstring''' lowerCAmelCase__ = features.mean(1 ) lowerCAmelCase__ = np.nan for i in range(UpperCamelCase_ ): lowerCAmelCase__ = features[:, labels == i] lowerCAmelCase__ = data.shape[1] lowerCAmelCase__ = data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ ) , (column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) lowerCAmelCase__ = device_data * np.dot( column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ ) , (column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ )).T , ) return covariance_sum / features.shape[1] def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray: '''simple docstring''' if features.any(): lowerCAmelCase__ = features.mean(1 ) # Center the dataset lowerCAmelCase__ = features - np.reshape(UpperCamelCase_ , (data_mean.size, 1) ) lowerCAmelCase__ = np.dot(UpperCamelCase_ , centered_data.T ) / features.shape[1] lowerCAmelCase__ ,lowerCAmelCase__ = np.linalg.eigh(UpperCamelCase_ ) # Take all the columns in the reverse order (-1), and then takes only the first lowerCAmelCase__ = eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space lowerCAmelCase__ = np.dot(filtered_eigenvectors.T , UpperCamelCase_ ) logging.info("Principal Component Analysis computed" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=UpperCamelCase_ ) logging.error("Dataset empty" ) raise AssertionError def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> np.ndarray: '''simple docstring''' assert classes > dimensions # Check if features have been already loaded if features.any: lowerCAmelCase__ ,lowerCAmelCase__ = eigh( covariance_between_classes(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , covariance_within_classes(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , ) lowerCAmelCase__ = eigenvectors[:, ::-1][:, :dimensions] lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = np.linalg.svd(UpperCamelCase_ ) lowerCAmelCase__ = svd_matrix[:, 0:dimensions] lowerCAmelCase__ = np.dot(filtered_svd_matrix.T , UpperCamelCase_ ) logging.info("Linear Discriminant Analysis computed" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=UpperCamelCase_ ) logging.error("Dataset empty" ) raise AssertionError def A ( ) -> None: '''simple docstring''' lowerCAmelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) lowerCAmelCase__ = np.array([0, 0, 0, 1, 1] ) lowerCAmelCase__ = 2 lowerCAmelCase__ = 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(UpperCamelCase_ ) as error_info: lowerCAmelCase__ = linear_discriminant_analysis( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) if isinstance(UpperCamelCase_ , np.ndarray ): raise AssertionError( "Did not raise AssertionError for dimensions > classes" ) assert error_info.type is AssertionError def A ( ) -> None: '''simple docstring''' lowerCAmelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) lowerCAmelCase__ = 2 lowerCAmelCase__ = np.array([[6.92_820_323, 8.66_025_404, 10.39_230_485], [3.0, 3.0, 3.0]] ) with pytest.raises(UpperCamelCase_ ) as error_info: lowerCAmelCase__ = principal_component_analysis(UpperCamelCase_ , UpperCamelCase_ ) if not np.allclose(UpperCamelCase_ , UpperCamelCase_ ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
48
0
import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def __a ( A__ : str , A__ : Optional[int] , A__ : int ): # Initialise PyTorch model SCREAMING_SNAKE_CASE = AlbertConfig.from_json_file(A__ ) print(F"Building PyTorch model from configuration: {config}" ) SCREAMING_SNAKE_CASE = AlbertForPreTraining(A__ ) # Load weights from tf checkpoint load_tf_weights_in_albert(A__ , A__ , A__ ) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict() , A__ ) if __name__ == "__main__": __A : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--albert_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained ALBERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __A : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
16
'''simple docstring''' def A ( UpperCamelCase_ : str , UpperCamelCase_ : int ) -> list: '''simple docstring''' lowerCAmelCase__ = word.split() def justify(UpperCamelCase_ : list , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> str: lowerCAmelCase__ = max_width - width lowerCAmelCase__ = len(UpperCamelCase_ ) if len(UpperCamelCase_ ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: lowerCAmelCase__ = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] lowerCAmelCase__ = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] lowerCAmelCase__ = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(UpperCamelCase_ ): num_spaces_between_words_list[i] += 1 lowerCAmelCase__ = [] for i in range(UpperCamelCase_ ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * " " ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(UpperCamelCase_ ) lowerCAmelCase__ = [] lowerCAmelCase__ = [] lowerCAmelCase__ = 0 for word in words: if width + len(UpperCamelCase_ ) + len(UpperCamelCase_ ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(UpperCamelCase_ ) width += len(UpperCamelCase_ ) else: # justify the line and add it to result answer.append(justify(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) ) # reset new line and new width lowerCAmelCase__ ,lowerCAmelCase__ = [word], len(UpperCamelCase_ ) lowerCAmelCase__ = max_width - width - len(UpperCamelCase_ ) answer.append(" ".join(UpperCamelCase_ ) + (remaining_spaces + 1) * " " ) return answer if __name__ == "__main__": from doctest import testmod testmod()
48
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCAmelCase_ : Optional[Any] = { '''configuration_altclip''': [ '''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AltCLIPConfig''', '''AltCLIPTextConfig''', '''AltCLIPVisionConfig''', ], '''processing_altclip''': ['''AltCLIPProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : List[str] = [ '''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''AltCLIPPreTrainedModel''', '''AltCLIPModel''', '''AltCLIPTextModel''', '''AltCLIPVisionModel''', ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys UpperCAmelCase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
17
'''simple docstring''' import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 UpperCAmelCase__ : str = sys.version_info >= (3, 10) def A ( UpperCamelCase_ : Any=None , UpperCamelCase_ : List[Any]=None ) -> Optional[int]: '''simple docstring''' return field(default_factory=lambda: default , metadata=UpperCamelCase_ ) @dataclass class A : snake_case__ :int snake_case__ :float snake_case__ :str snake_case__ :bool @dataclass class A : snake_case__ :int = 42 snake_case__ :str = field(default='toto' , metadata={'help': 'help message'} ) @dataclass class A : snake_case__ :bool = False snake_case__ :bool = True snake_case__ :Optional[bool] = None class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Any = 'titi' snake_case__ :Optional[int] = 'toto' class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Union[str, Any] = 'titi' snake_case__ :str = 'toto' snake_case__ :int = 42 @dataclass class A : snake_case__ :BasicEnum = "toto" def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = BasicEnum(self.foo ) @dataclass class A : snake_case__ :MixedTypeEnum = "toto" def __SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" lowerCAmelCase__ = MixedTypeEnum(self.foo ) @dataclass class A : snake_case__ :Optional[int] = None snake_case__ :Optional[float] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} ) snake_case__ :Optional[str] = None snake_case__ :Optional[List[str]] = list_field(default=[] ) snake_case__ :Optional[List[int]] = list_field(default=[] ) @dataclass class A : snake_case__ :List[int] = list_field(default=[] ) snake_case__ :List[int] = list_field(default=[1, 2, 3] ) snake_case__ :List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) snake_case__ :List[float] = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class A : snake_case__ :List[int] = field() snake_case__ :str = field() snake_case__ :BasicEnum = field() def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" lowerCAmelCase__ = BasicEnum(self.required_enum ) @dataclass class A : snake_case__ :int snake_case__ :"BasicEnum" = field() snake_case__ :"Optional[bool]" = None snake_case__ :"str" = field(default='toto' , metadata={'help': 'help message'} ) snake_case__ :"List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) if is_python_no_less_than_3_10: @dataclass class A : snake_case__ :bool = False snake_case__ :bool = True snake_case__ :bool | None = None @dataclass class A : snake_case__ :int | None = None snake_case__ :float | None = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} ) snake_case__ :str | None = None snake_case__ :list[str] | None = list_field(default=[] ) snake_case__ :list[int] | None = list_field(default=[] ) class A ( unittest.TestCase ): def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : argparse.ArgumentParser , __magic_name__ : argparse.ArgumentParser ): """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): lowerCAmelCase__ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"} lowerCAmelCase__ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get("choices" , __magic_name__ ) and yy.get("choices" , __magic_name__ ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx["type"](__magic_name__ ) , yy["type"](__magic_name__ ) ) del xx["type"], yy["type"] self.assertEqual(__magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ ) expected.add_argument("--bar" , type=__magic_name__ , required=__magic_name__ ) expected.add_argument("--baz" , type=__magic_name__ , required=__magic_name__ ) expected.add_argument("--flag" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" ) self.argparsersEqual(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = ["--foo", "1", "--baz", "quux", "--bar", "0.5"] ((lowerCAmelCase__) ,) = parser.parse_args_into_dataclasses(__magic_name__ , look_for_args_file=__magic_name__ ) self.assertFalse(example.flag ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument("--foo" , default=42 , type=__magic_name__ ) expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" ) self.argparsersEqual(__magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument("--foo" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" ) expected.add_argument("--baz" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument("--no_baz" , action="store_false" , default=__magic_name__ , dest="baz" ) expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ ) lowerCAmelCase__ = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(__magic_name__ ) for dataclass_type in dataclass_types: lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) self.argparsersEqual(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = parser.parse_args([] ) self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) ) lowerCAmelCase__ = parser.parse_args(["--foo", "--no_baz"] ) self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) ) lowerCAmelCase__ = parser.parse_args(["--foo", "--baz"] ) self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) ) lowerCAmelCase__ = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] ) self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) ) lowerCAmelCase__ = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] ) self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument( "--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , ) self.argparsersEqual(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = parser.parse_args([] ) self.assertEqual(args.foo , "toto" ) lowerCAmelCase__ = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) lowerCAmelCase__ = parser.parse_args(["--foo", "titi"] ) self.assertEqual(args.foo , "titi" ) lowerCAmelCase__ = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) lowerCAmelCase__ = parser.parse_args(["--foo", "42"] ) self.assertEqual(args.foo , 42 ) lowerCAmelCase__ = parser.parse_args_into_dataclasses(["--foo", "42"] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def __SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" @dataclass class A : snake_case__ :Literal["titi", "toto", 42] = "toto" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument( "--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , ) self.argparsersEqual(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = parser.parse_args([] ) self.assertEqual(args.foo , "toto" ) lowerCAmelCase__ = parser.parse_args(["--foo", "titi"] ) self.assertEqual(args.foo , "titi" ) lowerCAmelCase__ = parser.parse_args(["--foo", "42"] ) self.assertEqual(args.foo , 42 ) def __SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument("--foo_int" , nargs="+" , default=[] , type=__magic_name__ ) expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=__magic_name__ ) expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ ) expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=__magic_name__ ) self.argparsersEqual(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = parser.parse_args([] ) self.assertEqual( __magic_name__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , ) lowerCAmelCase__ = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() ) self.assertEqual(__magic_name__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) ) def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument("--foo" , default=__magic_name__ , type=__magic_name__ ) expected.add_argument("--bar" , default=__magic_name__ , type=__magic_name__ , help="help message" ) expected.add_argument("--baz" , default=__magic_name__ , type=__magic_name__ ) expected.add_argument("--ces" , nargs="+" , default=[] , type=__magic_name__ ) expected.add_argument("--des" , nargs="+" , default=[] , type=__magic_name__ ) lowerCAmelCase__ = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(__magic_name__ ) for dataclass_type in dataclass_types: lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) self.argparsersEqual(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = parser.parse_args([] ) self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , bar=__magic_name__ , baz=__magic_name__ , ces=[] , des=[] ) ) lowerCAmelCase__ = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() ) self.assertEqual(__magic_name__ , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) ) def __SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument("--required_list" , nargs="+" , type=__magic_name__ , required=__magic_name__ ) expected.add_argument("--required_str" , type=__magic_name__ , required=__magic_name__ ) expected.add_argument( "--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , ) self.argparsersEqual(__magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ ) expected.add_argument( "--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , ) expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ ) expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" ) expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ ) self.argparsersEqual(__magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } lowerCAmelCase__ = parser.parse_dict(__magic_name__ )[0] lowerCAmelCase__ = BasicExample(**__magic_name__ ) self.assertEqual(__magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, "extra": 42, } self.assertRaises(__magic_name__ , parser.parse_dict , __magic_name__ , allow_extra_keys=__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase__ = os.path.join(__magic_name__ , "temp_json" ) os.mkdir(__magic_name__ ) with open(temp_local_path + ".json" , "w+" ) as f: json.dump(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0] lowerCAmelCase__ = BasicExample(**__magic_name__ ) self.assertEqual(__magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase__ = os.path.join(__magic_name__ , "temp_yaml" ) os.mkdir(__magic_name__ ) with open(temp_local_path + ".yaml" , "w+" ) as f: yaml.dump(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0] lowerCAmelCase__ = BasicExample(**__magic_name__ ) self.assertEqual(__magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) self.assertIsNotNone(__magic_name__ )
48
0
'''simple docstring''' import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple=None ): '''simple docstring''' _lowerCAmelCase = None if token is not None: _lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} _lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' _lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json() _lowerCAmelCase = {} try: job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) _lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 ) for i in range(SCREAMING_SNAKE_CASE_ ): _lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json() job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} ) return job_links except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict=None ): '''simple docstring''' _lowerCAmelCase = None if token is not None: _lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} _lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100''' _lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json() _lowerCAmelCase = {} try: artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) _lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 ) for i in range(SCREAMING_SNAKE_CASE_ ): _lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json() artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} ) return artifacts except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ): '''simple docstring''' _lowerCAmelCase = None if token is not None: _lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} _lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = result.headers["Location"] _lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F'''{artifact_name}.zip''' ) with open(SCREAMING_SNAKE_CASE_ , "wb" ) as fp: fp.write(response.content ) def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=None ): '''simple docstring''' _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = None with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z: for filename in z.namelist(): if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(SCREAMING_SNAKE_CASE_ ) as f: for line in f: _lowerCAmelCase = line.decode("UTF-8" ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs _lowerCAmelCase = line[: line.index(": " )] _lowerCAmelCase = line[line.index(": " ) + len(": " ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith("FAILED " ): # `test` is the test method that failed _lowerCAmelCase = line[len("FAILED " ) :] failed_tests.append(SCREAMING_SNAKE_CASE_ ) elif filename == "job_name.txt": _lowerCAmelCase = line if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ): raise ValueError( F'''`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE_ )} for `errors` ''' F'''and {len(SCREAMING_SNAKE_CASE_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some''' " problem." ) _lowerCAmelCase = None if job_name and job_links: _lowerCAmelCase = job_links.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # A list with elements of the form (line of error, error, failed test) _lowerCAmelCase = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] return result def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=None ): '''simple docstring''' _lowerCAmelCase = [] _lowerCAmelCase = [os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for p in os.listdir(SCREAMING_SNAKE_CASE_ ) if p.endswith(".zip" )] for p in paths: errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE_ , job_links=SCREAMING_SNAKE_CASE_ ) ) return errors def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str=None ): '''simple docstring''' _lowerCAmelCase = Counter() counter.update([x[1] for x in logs] ) _lowerCAmelCase = counter.most_common() _lowerCAmelCase = {} for error, count in counts: if error_filter is None or error not in error_filter: _lowerCAmelCase = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]} _lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) ) return r def __a(SCREAMING_SNAKE_CASE_ : List[str] ): '''simple docstring''' _lowerCAmelCase = test.split("::" )[0] if test.startswith("tests/models/" ): _lowerCAmelCase = test.split("/" )[2] else: _lowerCAmelCase = None return test def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple=None ): '''simple docstring''' _lowerCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs] _lowerCAmelCase = [x for x in logs if x[2] is not None] _lowerCAmelCase = {x[2] for x in logs} _lowerCAmelCase = {} for test in tests: _lowerCAmelCase = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) _lowerCAmelCase = counter.most_common() _lowerCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} _lowerCAmelCase = sum(error_counts.values() ) if n_errors > 0: _lowerCAmelCase = {"count": n_errors, "errors": error_counts} _lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) ) return r def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ): '''simple docstring''' _lowerCAmelCase = "| no. | error | status |" _lowerCAmelCase = "|-:|:-|:-|" _lowerCAmelCase = [header, sep] for error in reduced_by_error: _lowerCAmelCase = reduced_by_error[error]["count"] _lowerCAmelCase = F'''| {count} | {error[:100]} | |''' lines.append(SCREAMING_SNAKE_CASE_ ) return "\n".join(SCREAMING_SNAKE_CASE_ ) def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ): '''simple docstring''' _lowerCAmelCase = "| model | no. of errors | major error | count |" _lowerCAmelCase = "|-:|-:|-:|-:|" _lowerCAmelCase = [header, sep] for model in reduced_by_model: _lowerCAmelCase = reduced_by_model[model]["count"] _lowerCAmelCase , _lowerCAmelCase = list(reduced_by_model[model]["errors"].items() )[0] _lowerCAmelCase = F'''| {model} | {count} | {error[:60]} | {_count} |''' lines.append(SCREAMING_SNAKE_CASE_ ) return "\n".join(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") parser.add_argument( "--output_dir", type=str, required=True, help="Where to store the downloaded artifacts and other result files.", ) parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.") _SCREAMING_SNAKE_CASE = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) _SCREAMING_SNAKE_CASE = get_job_links(args.workflow_run_id, token=args.token) _SCREAMING_SNAKE_CASE = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: _SCREAMING_SNAKE_CASE = k.find(" / ") _SCREAMING_SNAKE_CASE = k[index + len(" / ") :] _SCREAMING_SNAKE_CASE = v with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) _SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) _SCREAMING_SNAKE_CASE = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error _SCREAMING_SNAKE_CASE = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors _SCREAMING_SNAKE_CASE = counter.most_common(30) for item in most_common: print(item) with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) _SCREAMING_SNAKE_CASE = reduce_by_error(errors) _SCREAMING_SNAKE_CASE = reduce_by_model(errors) _SCREAMING_SNAKE_CASE = make_github_table(reduced_by_error) _SCREAMING_SNAKE_CASE = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp: fp.write(sa) with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp: fp.write(sa)
18
'''simple docstring''' import sys from collections import defaultdict class A : def __init__( self : Any ): """simple docstring""" lowerCAmelCase__ = [] def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[Any] ): """simple docstring""" return self.node_position[vertex] def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[str] , __magic_name__ : List[str] ): """simple docstring""" lowerCAmelCase__ = pos def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] ): """simple docstring""" if start > size // 2 - 1: return else: if 2 * start + 2 >= size: lowerCAmelCase__ = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: lowerCAmelCase__ = 2 * start + 1 else: lowerCAmelCase__ = 2 * start + 2 if heap[smallest_child] < heap[start]: lowerCAmelCase__ ,lowerCAmelCase__ = heap[smallest_child], positions[smallest_child] lowerCAmelCase__ ,lowerCAmelCase__ = ( heap[start], positions[start], ) lowerCAmelCase__ ,lowerCAmelCase__ = temp, tempa lowerCAmelCase__ = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , __magic_name__ ) self.top_to_bottom(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : List[str] ): """simple docstring""" lowerCAmelCase__ = position[index] while index != 0: lowerCAmelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: lowerCAmelCase__ = heap[parent] lowerCAmelCase__ = position[parent] self.set_position(position[parent] , __magic_name__ ) else: lowerCAmelCase__ = val lowerCAmelCase__ = temp self.set_position(__magic_name__ , __magic_name__ ) break lowerCAmelCase__ = parent else: lowerCAmelCase__ = val lowerCAmelCase__ = temp self.set_position(__magic_name__ , 0 ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : int ): """simple docstring""" lowerCAmelCase__ = len(__magic_name__ ) // 2 - 1 for i in range(__magic_name__ , -1 , -1 ): self.top_to_bottom(__magic_name__ , __magic_name__ , len(__magic_name__ ) , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ): """simple docstring""" lowerCAmelCase__ = positions[0] lowerCAmelCase__ = sys.maxsize self.top_to_bottom(__magic_name__ , 0 , len(__magic_name__ ) , __magic_name__ ) return temp def A ( UpperCamelCase_ : List[Any] ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase__ = Heap() lowerCAmelCase__ = [0] * len(UpperCamelCase_ ) lowerCAmelCase__ = [-1] * len(UpperCamelCase_ ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph lowerCAmelCase__ = [] # Heap of Distance of vertices from their neighboring vertex lowerCAmelCase__ = [] for vertex in range(len(UpperCamelCase_ ) ): distance_tv.append(sys.maxsize ) positions.append(UpperCamelCase_ ) heap.node_position.append(UpperCamelCase_ ) lowerCAmelCase__ = [] lowerCAmelCase__ = 1 lowerCAmelCase__ = sys.maxsize for neighbor, distance in adjacency_list[0]: lowerCAmelCase__ = 0 lowerCAmelCase__ = distance heap.heapify(UpperCamelCase_ , UpperCamelCase_ ) for _ in range(1 , len(UpperCamelCase_ ) ): lowerCAmelCase__ = heap.delete_minimum(UpperCamelCase_ , UpperCamelCase_ ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) lowerCAmelCase__ = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(UpperCamelCase_ )] ): lowerCAmelCase__ = distance heap.bottom_to_top( UpperCamelCase_ , heap.get_position(UpperCamelCase_ ) , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > UpperCAmelCase__ : Optional[int] = int(input("Enter number of edges: ").strip()) UpperCAmelCase__ : str = defaultdict(list) for _ in range(edges_number): UpperCAmelCase__ : int = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
48
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _a = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = ["""GPTSw3Tokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys _a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
19
'''simple docstring''' import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase__ : Tuple = get_tests_dir("fixtures/test_sentencepiece.model") if is_sentencepiece_available(): import sentencepiece as sp UpperCAmelCase__ : Tuple = 5 UpperCAmelCase__ : List[Any] = 10 @require_sentencepiece @require_tokenizers class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): snake_case__ :Tuple = SpeechaTextTokenizer snake_case__ :Dict = False snake_case__ :Optional[int] = True def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" super().setUp() lowerCAmelCase__ = sp.SentencePieceProcessor() spm_model.Load(__magic_name__ ) lowerCAmelCase__ = ["<s>", "<pad>", "</s>", "<unk>"] vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__magic_name__ ) )] lowerCAmelCase__ = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) lowerCAmelCase__ = Path(self.tmpdirname ) save_json(__magic_name__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(__magic_name__ , save_dir / VOCAB_FILES_NAMES["spm_file"] ) lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def __SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" lowerCAmelCase__ = "<pad>" lowerCAmelCase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(__magic_name__ ) , 1001 ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1001 ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) lowerCAmelCase__ = tokenizer.tokenize("This is a test" ) self.assertListEqual(__magic_name__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__magic_name__ ) , [289, 50, 14, 174, 386] , ) lowerCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( __magic_name__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , ) lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(__magic_name__ ) self.assertListEqual(__magic_name__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] ) lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(__magic_name__ ) self.assertListEqual( __magic_name__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , ) @slow def __SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" lowerCAmelCase__ = {"input_ids": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__magic_name__ , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , ) @require_sentencepiece class A ( unittest.TestCase ): snake_case__ :Union[str, Any] = 'valhalla/s2t_mustc_multilinguial_medium' snake_case__ :Tuple = 'C\'est trop cool' snake_case__ :List[str] = 'Esto es genial' @classmethod def __SCREAMING_SNAKE_CASE ( cls : List[Any] ): """simple docstring""" lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name ) return cls def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 ) self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 ) self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 ) self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 ) def __SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" self.assertEqual(self.tokenizer.vocab_size , 10000 ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" self.assertIn(__magic_name__ , self.tokenizer.all_special_ids ) lowerCAmelCase__ = [ES_CODE, 4, 1601, 47, 7647, 2] lowerCAmelCase__ = self.tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ ) lowerCAmelCase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__magic_name__ ) self.assertEqual(__magic_name__ , __magic_name__ ) self.assertNotIn(self.tokenizer.eos_token , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" lowerCAmelCase__ = "fr" lowerCAmelCase__ = self.tokenizer(self.french_text ).input_ids self.assertEqual(encoded[0] , __magic_name__ ) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id ) def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" lowerCAmelCase__ = "fr" self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] ) lowerCAmelCase__ = "es" self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
48
0
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _lowerCAmelCase: Optional[int] = logging.get_logger(__name__) class lowercase_ (lowercase__ , lowercase__ ): snake_case ='maskformer-swin' snake_case ={ 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self , lowercase_=224 , lowercase_=4 , lowercase_=3 , lowercase_=96 , lowercase_=[2, 2, 6, 2] , lowercase_=[3, 6, 12, 24] , lowercase_=7 , lowercase_=4.0 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_="gelu" , lowercase_=False , lowercase_=0.02 , lowercase_=1e-5 , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Union[str, Any]: super().__init__(**lowercase_) a__ =image_size a__ =patch_size a__ =num_channels a__ =embed_dim a__ =depths a__ =len(lowercase_) a__ =num_heads a__ =window_size a__ =mlp_ratio a__ =qkv_bias a__ =hidden_dropout_prob a__ =attention_probs_dropout_prob a__ =drop_path_rate a__ =hidden_act a__ =use_absolute_embeddings a__ =layer_norm_eps a__ =initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model a__ =int(embed_dim * 2 ** (len(lowercase_) - 1)) a__ =['stem'] + [F"""stage{idx}""" for idx in range(1 , len(lowercase_) + 1)] a__ , a__ =get_aligned_output_features_output_indices( out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names)
20
'''simple docstring''' from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig UpperCAmelCase__ : Tuple = logging.get_logger(__name__) # General docstring UpperCAmelCase__ : int = "RegNetConfig" # Base docstring UpperCAmelCase__ : Optional[int] = "facebook/regnet-y-040" UpperCAmelCase__ : Optional[int] = [1, 10_88, 7, 7] # Image classification docstring UpperCAmelCase__ : Tuple = "facebook/regnet-y-040" UpperCAmelCase__ : Optional[Any] = "tabby, tabby cat" UpperCAmelCase__ : int = [ "facebook/regnet-y-040", # See all regnet models at https://huggingface.co/models?filter=regnet ] class A ( tf.keras.layers.Layer ): def __init__( self : str , __magic_name__ : int , __magic_name__ : int = 3 , __magic_name__ : int = 1 , __magic_name__ : int = 1 , __magic_name__ : Optional[str] = "relu" , **__magic_name__ : int , ): """simple docstring""" super().__init__(**__magic_name__ ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb lowerCAmelCase__ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) lowerCAmelCase__ = tf.keras.layers.ConvaD( filters=__magic_name__ , kernel_size=__magic_name__ , strides=__magic_name__ , padding="VALID" , groups=__magic_name__ , use_bias=__magic_name__ , name="convolution" , ) lowerCAmelCase__ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) lowerCAmelCase__ = ACTaFN[activation] if activation is not None else tf.identity def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ): """simple docstring""" lowerCAmelCase__ = self.convolution(self.padding(__magic_name__ ) ) lowerCAmelCase__ = self.normalization(__magic_name__ ) lowerCAmelCase__ = self.activation(__magic_name__ ) return hidden_state class A ( tf.keras.layers.Layer ): def __init__( self : List[Any] , __magic_name__ : RegNetConfig , **__magic_name__ : str ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = config.num_channels lowerCAmelCase__ = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , ) def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : List[Any] ): """simple docstring""" lowerCAmelCase__ = shape_list(__magic_name__ )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 2, 3, 1) ) lowerCAmelCase__ = self.embedder(__magic_name__ ) return hidden_state class A ( tf.keras.layers.Layer ): def __init__( self : Any , __magic_name__ : int , __magic_name__ : int = 2 , **__magic_name__ : Optional[Any] ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = tf.keras.layers.ConvaD( filters=__magic_name__ , kernel_size=1 , strides=__magic_name__ , use_bias=__magic_name__ , name="convolution" ) lowerCAmelCase__ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : tf.Tensor , __magic_name__ : bool = False ): """simple docstring""" return self.normalization(self.convolution(__magic_name__ ) , training=__magic_name__ ) class A ( tf.keras.layers.Layer ): def __init__( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : int , **__magic_name__ : List[Any] ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__magic_name__ , name="pooler" ) lowerCAmelCase__ = [ tf.keras.layers.ConvaD(filters=__magic_name__ , kernel_size=1 , activation="relu" , name="attention.0" ), tf.keras.layers.ConvaD(filters=__magic_name__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ), ] def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = self.pooler(__magic_name__ ) for layer_module in self.attention: lowerCAmelCase__ = layer_module(__magic_name__ ) lowerCAmelCase__ = hidden_state * pooled return hidden_state class A ( tf.keras.layers.Layer ): def __init__( self : int , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 1 , **__magic_name__ : str ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = in_channels != out_channels or stride != 1 lowerCAmelCase__ = max(1 , out_channels // config.groups_width ) lowerCAmelCase__ = ( TFRegNetShortCut(__magic_name__ , stride=__magic_name__ , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. lowerCAmelCase__ = [ TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( __magic_name__ , stride=__magic_name__ , groups=__magic_name__ , activation=config.hidden_act , name="layer.1" ), TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=__magic_name__ , name="layer.2" ), ] lowerCAmelCase__ = ACTaFN[config.hidden_act] def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Any ): """simple docstring""" lowerCAmelCase__ = hidden_state for layer_module in self.layers: lowerCAmelCase__ = layer_module(__magic_name__ ) lowerCAmelCase__ = self.shortcut(__magic_name__ ) hidden_state += residual lowerCAmelCase__ = self.activation(__magic_name__ ) return hidden_state class A ( tf.keras.layers.Layer ): def __init__( self : int , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 1 , **__magic_name__ : str ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = in_channels != out_channels or stride != 1 lowerCAmelCase__ = max(1 , out_channels // config.groups_width ) lowerCAmelCase__ = ( TFRegNetShortCut(__magic_name__ , stride=__magic_name__ , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) lowerCAmelCase__ = [ TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( __magic_name__ , stride=__magic_name__ , groups=__magic_name__ , activation=config.hidden_act , name="layer.1" ), TFRegNetSELayer(__magic_name__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ), TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=__magic_name__ , name="layer.3" ), ] lowerCAmelCase__ = ACTaFN[config.hidden_act] def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : Any ): """simple docstring""" lowerCAmelCase__ = hidden_state for layer_module in self.layers: lowerCAmelCase__ = layer_module(__magic_name__ ) lowerCAmelCase__ = self.shortcut(__magic_name__ ) hidden_state += residual lowerCAmelCase__ = self.activation(__magic_name__ ) return hidden_state class A ( tf.keras.layers.Layer ): def __init__( self : Union[str, Any] , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 2 , __magic_name__ : int = 2 , **__magic_name__ : Optional[int] ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer lowerCAmelCase__ = [ # downsampling is done in the first layer with stride of 2 layer(__magic_name__ , __magic_name__ , __magic_name__ , stride=__magic_name__ , name="layers.0" ), *[layer(__magic_name__ , __magic_name__ , __magic_name__ , name=f"""layers.{i+1}""" ) for i in range(depth - 1 )], ] def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[str] ): """simple docstring""" for layer_module in self.layers: lowerCAmelCase__ = layer_module(__magic_name__ ) return hidden_state class A ( tf.keras.layers.Layer ): def __init__( self : Tuple , __magic_name__ : RegNetConfig , **__magic_name__ : Union[str, Any] ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( __magic_name__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) ) lowerCAmelCase__ = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(__magic_name__ , config.depths[1:] ) ): self.stages.append(TFRegNetStage(__magic_name__ , __magic_name__ , __magic_name__ , depth=__magic_name__ , name=f"""stages.{i+1}""" ) ) def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : tf.Tensor , __magic_name__ : bool = False , __magic_name__ : bool = True ): """simple docstring""" lowerCAmelCase__ = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: lowerCAmelCase__ = hidden_states + (hidden_state,) lowerCAmelCase__ = stage_module(__magic_name__ ) if output_hidden_states: lowerCAmelCase__ = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=__magic_name__ , hidden_states=__magic_name__ ) @keras_serializable class A ( tf.keras.layers.Layer ): snake_case__ :List[Any] = RegNetConfig def __init__( self : str , __magic_name__ : Union[str, Any] , **__magic_name__ : Union[str, Any] ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = config lowerCAmelCase__ = TFRegNetEmbeddings(__magic_name__ , name="embedder" ) lowerCAmelCase__ = TFRegNetEncoder(__magic_name__ , name="encoder" ) lowerCAmelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__magic_name__ , name="pooler" ) @unpack_inputs def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : tf.Tensor , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , ): """simple docstring""" lowerCAmelCase__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict lowerCAmelCase__ = self.embedder(__magic_name__ , training=__magic_name__ ) lowerCAmelCase__ = self.encoder( __magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ ) lowerCAmelCase__ = encoder_outputs[0] lowerCAmelCase__ = self.pooler(__magic_name__ ) # Change to NCHW output format have uniformity in the modules lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) ) lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: lowerCAmelCase__ = tuple([tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__magic_name__ , pooler_output=__magic_name__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :str = RegNetConfig snake_case__ :Optional[Any] = 'regnet' snake_case__ :Tuple = 'pixel_values' @property def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )} UpperCAmelCase__ : List[str] = R"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n" UpperCAmelCase__ : Tuple = R"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( 'The bare RegNet model outputting raw features without any specific head on top.' , SCREAMING_SNAKE_CASE__ , ) class A ( SCREAMING_SNAKE_CASE__ ): def __init__( self : Any , __magic_name__ : RegNetConfig , *__magic_name__ : Optional[int] , **__magic_name__ : Union[str, Any] ): """simple docstring""" super().__init__(__magic_name__ , *__magic_name__ , **__magic_name__ ) lowerCAmelCase__ = TFRegNetMainLayer(__magic_name__ , name="regnet" ) @unpack_inputs @add_start_docstrings_to_model_forward(__magic_name__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : tf.Tensor , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : int=False , ): """simple docstring""" lowerCAmelCase__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict lowerCAmelCase__ = self.regnet( pixel_values=__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( '\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , SCREAMING_SNAKE_CASE__ , ) class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): def __init__( self : Tuple , __magic_name__ : RegNetConfig , *__magic_name__ : Tuple , **__magic_name__ : Optional[int] ): """simple docstring""" super().__init__(__magic_name__ , *__magic_name__ , **__magic_name__ ) lowerCAmelCase__ = config.num_labels lowerCAmelCase__ = TFRegNetMainLayer(__magic_name__ , name="regnet" ) # classification head lowerCAmelCase__ = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(__magic_name__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : tf.Tensor = None , __magic_name__ : tf.Tensor = None , __magic_name__ : bool = None , __magic_name__ : bool = None , __magic_name__ : Dict=False , ): """simple docstring""" lowerCAmelCase__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict lowerCAmelCase__ = self.regnet( __magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ ) lowerCAmelCase__ = outputs.pooler_output if return_dict else outputs[1] lowerCAmelCase__ = self.classifier[0](__magic_name__ ) lowerCAmelCase__ = self.classifier[1](__magic_name__ ) lowerCAmelCase__ = None if labels is None else self.hf_compute_loss(labels=__magic_name__ , logits=__magic_name__ ) if not return_dict: lowerCAmelCase__ = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=__magic_name__ , logits=__magic_name__ , hidden_states=outputs.hidden_states )
48
0
import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def lowerCAmelCase_ ( lowerCamelCase ): return (data["data"], data["target"]) def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ): __magic_name__ : Optional[Any] =XGBClassifier() classifier.fit(lowerCamelCase , lowerCamelCase ) return classifier def lowerCAmelCase_ ( ): __magic_name__ : Dict =load_iris() __magic_name__ , __magic_name__ : Union[str, Any] =data_handling(lowerCamelCase ) __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Optional[int] =train_test_split( lowerCamelCase , lowerCamelCase , test_size=0.2_5 ) __magic_name__ : Any =iris["""target_names"""] # Create an XGBoost Classifier from the training data __magic_name__ : Union[str, Any] =xgboost(lowerCamelCase , lowerCamelCase ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( lowerCamelCase , lowerCamelCase , lowerCamelCase , display_labels=lowerCamelCase , cmap="""Blues""" , normalize="""true""" , ) plt.title("""Normalized Confusion Matrix - IRIS Dataset""" ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
21
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def A ( UpperCamelCase_ : Tuple ) -> int: '''simple docstring''' for param in module.parameters(): lowerCAmelCase__ = False def A ( ) -> Tuple: '''simple docstring''' lowerCAmelCase__ = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): lowerCAmelCase__ = "mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def A ( UpperCamelCase_ : Optional[int] ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase__ = plt.imshow(UpperCamelCase_ ) fig.axes.get_xaxis().set_visible(UpperCamelCase_ ) fig.axes.get_yaxis().set_visible(UpperCamelCase_ ) plt.show() def A ( ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase__ = datetime.now() lowerCAmelCase__ = current_time.strftime("%H:%M:%S" ) return timestamp
48
0
'''simple docstring''' def snake_case_ (UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : List[Any]=False ): '''simple docstring''' if isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ): _a = len(set_a.intersection(UpperCamelCase ) ) if alternative_union: _a = len(UpperCamelCase ) + len(UpperCamelCase ) else: _a = len(set_a.union(UpperCamelCase ) ) return intersection / union if isinstance(UpperCamelCase , (list, tuple) ) and isinstance(UpperCamelCase , (list, tuple) ): _a = [element for element in set_a if element in set_b] if alternative_union: _a = len(UpperCamelCase ) + len(UpperCamelCase ) return len(UpperCamelCase ) / union else: _a = set_a + [element for element in set_b if element not in set_a] return len(UpperCamelCase ) / len(UpperCamelCase ) return len(UpperCamelCase ) / len(UpperCamelCase ) return None if __name__ == "__main__": _snake_case : Dict = {'a', 'b', 'c', 'd', 'e'} _snake_case : Dict = {'c', 'd', 'e', 'f', 'h', 'i'} print(jaccard_similarity(set_a, set_b))
22
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCAmelCase__ : List[Any] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Union[str, Any] = ["EncoderDecoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Optional[int] = ["TFEncoderDecoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Optional[Any] = ["FlaxEncoderDecoderModel"] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys UpperCAmelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
48
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available snake_case__ : Optional[Any] = {"""tokenization_herbert""": ["""HerbertTokenizer"""]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Any = ["""HerbertTokenizerFast"""] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys snake_case__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
23
'''simple docstring''' import argparse from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird from transformers.utils import logging logging.set_verbosity_info() def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : int ) -> Any: '''simple docstring''' lowerCAmelCase__ = BigBirdConfig.from_json_file(UpperCamelCase_ ) print(F"""Building PyTorch model from configuration: {config}""" ) if is_trivia_qa: lowerCAmelCase__ = BigBirdForQuestionAnswering(UpperCamelCase_ ) else: lowerCAmelCase__ = BigBirdForPreTraining(UpperCamelCase_ ) # Load weights from tf checkpoint load_tf_weights_in_big_bird(UpperCamelCase_ , UpperCamelCase_ , is_trivia_qa=UpperCamelCase_ ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(UpperCamelCase_ ) if __name__ == "__main__": UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--big_bird_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head." ) UpperCAmelCase__ : int = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa )
48
0
'''simple docstring''' from collections import deque def _UpperCamelCase (_lowerCamelCase : Union[str, Any] )-> Optional[int]: '''simple docstring''' __snake_case = len(_lowerCamelCase ) __snake_case = deque() __snake_case = [False for _ in range(_lowerCamelCase )] __snake_case = [-1 for _ in range(_lowerCamelCase )] __snake_case = index_of[:] def strong_connect(_lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : List[str] ): __snake_case = index # the number when this node is seen __snake_case = index # lowest rank node reachable from here index += 1 stack.append(_lowerCamelCase ) __snake_case = True for w in g[v]: if index_of[w] == -1: __snake_case = strong_connect(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) __snake_case = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) elif on_stack[w]: __snake_case = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) if lowlink_of[v] == index_of[v]: __snake_case = [] __snake_case = stack.pop() __snake_case = False component.append(_lowerCamelCase ) while w != v: __snake_case = stack.pop() __snake_case = False component.append(_lowerCamelCase ) components.append(_lowerCamelCase ) return index __snake_case = [] for v in range(_lowerCamelCase ): if index_of[v] == -1: strong_connect(_lowerCamelCase , 0 , _lowerCamelCase ) return components def _UpperCamelCase (_lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] )-> Dict: '''simple docstring''' __snake_case = [[] for _ in range(_lowerCamelCase )] for u, v in edges: g[u].append(_lowerCamelCase ) return g if __name__ == "__main__": # Test UpperCAmelCase_ : List[str] = 7 UpperCAmelCase_ : int = [0, 0, 1, 2, 3, 3, 4, 4, 6] UpperCAmelCase_ : Dict = [1, 3, 2, 0, 1, 4, 5, 6, 5] UpperCAmelCase_ : List[str] = [(u, v) for u, v in zip(source, target)] UpperCAmelCase_ : Tuple = create_graph(n_vertices, edges) assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
24
'''simple docstring''' from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class A : def __init__( self : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : str=13 , __magic_name__ : List[str]=7 , __magic_name__ : Tuple=True , __magic_name__ : Tuple=True , __magic_name__ : str=True , __magic_name__ : int=True , __magic_name__ : int=99 , __magic_name__ : List[str]=[1, 1, 2] , __magic_name__ : Dict=1 , __magic_name__ : Tuple=32 , __magic_name__ : Any=4 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[Any]=37 , __magic_name__ : Tuple="gelu_new" , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : Tuple=0.0 , __magic_name__ : int=512 , __magic_name__ : Optional[int]=3 , __magic_name__ : List[str]=0.02 , __magic_name__ : Dict=3 , __magic_name__ : List[Any]=4 , __magic_name__ : Any=None , __magic_name__ : Dict=False , ): """simple docstring""" lowerCAmelCase__ = parent lowerCAmelCase__ = batch_size lowerCAmelCase__ = seq_length lowerCAmelCase__ = is_training lowerCAmelCase__ = use_input_mask lowerCAmelCase__ = use_token_type_ids lowerCAmelCase__ = use_labels lowerCAmelCase__ = vocab_size lowerCAmelCase__ = block_sizes lowerCAmelCase__ = num_decoder_layers lowerCAmelCase__ = d_model lowerCAmelCase__ = n_head lowerCAmelCase__ = d_head lowerCAmelCase__ = d_inner lowerCAmelCase__ = hidden_act lowerCAmelCase__ = hidden_dropout lowerCAmelCase__ = attention_dropout lowerCAmelCase__ = activation_dropout lowerCAmelCase__ = max_position_embeddings lowerCAmelCase__ = type_vocab_size lowerCAmelCase__ = 2 lowerCAmelCase__ = num_labels lowerCAmelCase__ = num_choices lowerCAmelCase__ = scope lowerCAmelCase__ = initializer_std # Used in the tests to check the size of the first attention layer lowerCAmelCase__ = n_head # Used in the tests to check the size of the first hidden state lowerCAmelCase__ = self.d_model # Used in the tests to check the number of output hidden states/attentions lowerCAmelCase__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: lowerCAmelCase__ = self.num_hidden_layers + 2 def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ = None if self.use_input_mask: lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ = None if self.use_token_type_ids: lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase__ = None lowerCAmelCase__ = None lowerCAmelCase__ = None if self.use_labels: lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase__ = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : str , ): """simple docstring""" lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ ) lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowerCAmelCase__ = model(__magic_name__ ) lowerCAmelCase__ = [input_ids, input_mask] lowerCAmelCase__ = model(__magic_name__ ) lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) lowerCAmelCase__ = False lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ ) lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) lowerCAmelCase__ = False lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ ) lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : int , ): """simple docstring""" lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ ) lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowerCAmelCase__ = model(__magic_name__ ) lowerCAmelCase__ = [input_ids, input_mask] lowerCAmelCase__ = model(__magic_name__ ) lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) lowerCAmelCase__ = False lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ ) lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) ) lowerCAmelCase__ = False lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ ) lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , ): """simple docstring""" lowerCAmelCase__ = TFFunnelForPreTraining(config=__magic_name__ ) lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Dict , ): """simple docstring""" lowerCAmelCase__ = TFFunnelForMaskedLM(config=__magic_name__ ) lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Any , ): """simple docstring""" lowerCAmelCase__ = self.num_labels lowerCAmelCase__ = TFFunnelForSequenceClassification(config=__magic_name__ ) lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : List[str] , ): """simple docstring""" lowerCAmelCase__ = self.num_choices lowerCAmelCase__ = TFFunnelForMultipleChoice(config=__magic_name__ ) lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) ) lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) ) lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) ) lowerCAmelCase__ = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : str , ): """simple docstring""" lowerCAmelCase__ = self.num_labels lowerCAmelCase__ = TFFunnelForTokenClassification(config=__magic_name__ ) lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : List[str] , ): """simple docstring""" lowerCAmelCase__ = TFFunnelForQuestionAnswering(config=__magic_name__ ) lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = self.prepare_config_and_inputs() ( ( lowerCAmelCase__ ) ,( lowerCAmelCase__ ) ,( lowerCAmelCase__ ) ,( lowerCAmelCase__ ) ,( lowerCAmelCase__ ) ,( lowerCAmelCase__ ) ,( lowerCAmelCase__ ) , ) = config_and_inputs lowerCAmelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): snake_case__ :int = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) snake_case__ :Any = ( { 'feature-extraction': (TFFunnelBaseModel, TFFunnelModel), 'fill-mask': TFFunnelForMaskedLM, 'question-answering': TFFunnelForQuestionAnswering, 'text-classification': TFFunnelForSequenceClassification, 'token-classification': TFFunnelForTokenClassification, 'zero-shot': TFFunnelForSequenceClassification, } if is_tf_available() else {} ) snake_case__ :str = False snake_case__ :Any = False def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = TFFunnelModelTester(self ) lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" self.config_tester.run_common_tests() def __SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__magic_name__ ) @require_tf class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): snake_case__ :Any = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) snake_case__ :int = False snake_case__ :List[Any] = False def __SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" lowerCAmelCase__ = TFFunnelModelTester(self , base=__magic_name__ ) lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" self.config_tester.run_common_tests() def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__magic_name__ )
48
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json', 'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json', 'junnyu/roformer_chinese_char_small': ( 'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json' ), 'junnyu/roformer_chinese_char_base': ( 'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json' ), 'junnyu/roformer_small_discriminator': ( 'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json' ), 'junnyu/roformer_small_generator': ( 'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ ='roformer' def __init__( self : Dict , a : Any=5_0000 , a : List[Any]=None , a : str=768 , a : str=12 , a : Tuple=12 , a : Optional[Any]=3072 , a : List[str]="gelu" , a : List[Any]=0.1 , a : Union[str, Any]=0.1 , a : Tuple=1536 , a : List[str]=2 , a : Tuple=0.02 , a : Any=1e-12 , a : Optional[int]=0 , a : Union[str, Any]=False , a : int=True , **a : str , ) -> int: """simple docstring""" super().__init__(pad_token_id=a , **a ) SCREAMING_SNAKE_CASE : str = vocab_size SCREAMING_SNAKE_CASE : int = hidden_size if embedding_size is None else embedding_size SCREAMING_SNAKE_CASE : List[str] = hidden_size SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers SCREAMING_SNAKE_CASE : int = num_attention_heads SCREAMING_SNAKE_CASE : Tuple = hidden_act SCREAMING_SNAKE_CASE : int = intermediate_size SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings SCREAMING_SNAKE_CASE : Any = type_vocab_size SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps SCREAMING_SNAKE_CASE : List[str] = rotary_value SCREAMING_SNAKE_CASE : int = use_cache class _UpperCamelCase ( __A ): '''simple docstring''' @property def __UpperCamelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": SCREAMING_SNAKE_CASE : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"} else: SCREAMING_SNAKE_CASE : str = {0: "batch", 1: "sequence"} SCREAMING_SNAKE_CASE : List[Any] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
25
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging UpperCAmelCase__ : Tuple = logging.get_logger(__name__) UpperCAmelCase__ : List[str] = { "google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json", # See all umt5 models at https://huggingface.co/models?filter=umt5 } class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Union[str, Any] = 'umt5' snake_case__ :Any = ['past_key_values'] def __init__( self : List[Any] , __magic_name__ : Tuple=250112 , __magic_name__ : str=512 , __magic_name__ : int=64 , __magic_name__ : str=1024 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[Any]=6 , __magic_name__ : Dict=32 , __magic_name__ : Optional[Any]=128 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=1E-6 , __magic_name__ : Optional[int]=1.0 , __magic_name__ : Dict="gated-gelu" , __magic_name__ : List[str]=True , __magic_name__ : Tuple=True , __magic_name__ : Optional[int]="T5Tokenizer" , __magic_name__ : str=True , __magic_name__ : int=0 , __magic_name__ : Union[str, Any]=1 , __magic_name__ : str=0 , **__magic_name__ : Any , ): """simple docstring""" super().__init__( is_encoder_decoder=__magic_name__ , tokenizer_class=__magic_name__ , tie_word_embeddings=__magic_name__ , pad_token_id=__magic_name__ , eos_token_id=__magic_name__ , decoder_start_token_id=__magic_name__ , **__magic_name__ , ) lowerCAmelCase__ = vocab_size lowerCAmelCase__ = d_model lowerCAmelCase__ = d_kv lowerCAmelCase__ = d_ff lowerCAmelCase__ = num_layers lowerCAmelCase__ = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowerCAmelCase__ = num_heads lowerCAmelCase__ = relative_attention_num_buckets lowerCAmelCase__ = relative_attention_max_distance lowerCAmelCase__ = dropout_rate lowerCAmelCase__ = layer_norm_epsilon lowerCAmelCase__ = initializer_factor lowerCAmelCase__ = feed_forward_proj lowerCAmelCase__ = use_cache lowerCAmelCase__ = self.feed_forward_proj.split("-" ) lowerCAmelCase__ = act_info[-1] lowerCAmelCase__ = act_info[0] == "gated" if len(__magic_name__ ) > 1 and act_info[0] != "gated" or len(__magic_name__ ) > 2: raise ValueError( f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'" ) if feed_forward_proj == "gated-gelu": lowerCAmelCase__ = "gelu_new" @property def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" return self.d_model @property def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" return self.num_heads @property def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" return self.num_layers class A ( SCREAMING_SNAKE_CASE__ ): @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" lowerCAmelCase__ = { "input_ids": {0: "batch", 1: "encoder_sequence"}, "attention_mask": {0: "batch", 1: "encoder_sequence"}, } if self.use_past: lowerCAmelCase__ = "past_encoder_sequence + sequence" lowerCAmelCase__ = {0: "batch"} lowerCAmelCase__ = {0: "batch", 1: "past_decoder_sequence + sequence"} else: lowerCAmelCase__ = {0: "batch", 1: "decoder_sequence"} lowerCAmelCase__ = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(__magic_name__ , direction="inputs" ) return common_inputs @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" return 13 @property def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" return 5E-4
48
0
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCamelCase = { "configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"], "feature_extraction_mctct": ["MCTCTFeatureExtractor"], "processing_mctct": ["MCTCTProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ "MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST", "MCTCTForCTC", "MCTCTModel", "MCTCTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys __UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
26
'''simple docstring''' from __future__ import annotations from collections import Counter from random import random class A : def __init__( self : Optional[int] ): """simple docstring""" lowerCAmelCase__ = {} def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ): """simple docstring""" lowerCAmelCase__ = {} def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : str , __magic_name__ : float ): """simple docstring""" if nodea not in self.connections: self.add_node(__magic_name__ ) if nodea not in self.connections: self.add_node(__magic_name__ ) lowerCAmelCase__ = probability def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" return list(self.connections ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ): """simple docstring""" lowerCAmelCase__ = 0 lowerCAmelCase__ = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def A ( UpperCamelCase_ : str , UpperCamelCase_ : list[tuple[str, str, float]] , UpperCamelCase_ : int ) -> dict[str, int]: '''simple docstring''' lowerCAmelCase__ = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ = Counter(graph.get_nodes() ) lowerCAmelCase__ = start for _ in range(UpperCamelCase_ ): lowerCAmelCase__ = graph.transition(UpperCamelCase_ ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
48
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A : Dict = { "configuration_blenderbot": [ "BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlenderbotConfig", "BlenderbotOnnxConfig", ], "tokenization_blenderbot": ["BlenderbotTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : str = ["BlenderbotTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : str = [ "BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST", "BlenderbotForCausalLM", "BlenderbotForConditionalGeneration", "BlenderbotModel", "BlenderbotPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = [ "TFBlenderbotForConditionalGeneration", "TFBlenderbotModel", "TFBlenderbotPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[Any] = [ "FlaxBlenderbotForConditionalGeneration", "FlaxBlenderbotModel", "FlaxBlenderbotPreTrainedModel", ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys __A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
27
'''simple docstring''' import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration UpperCAmelCase__ : Optional[Any] = pytest.mark.integration UpperCAmelCase__ : str = {"comet"} UpperCAmelCase__ : Optional[Any] = importlib.util.find_spec("fairseq") is not None UpperCAmelCase__ : Optional[int] = {"code_eval"} UpperCAmelCase__ : List[Any] = os.name == "nt" UpperCAmelCase__ : Optional[int] = {"bertscore", "frugalscore", "perplexity"} UpperCAmelCase__ : int = importlib.util.find_spec("transformers") is not None def A ( UpperCamelCase_ : Optional[int] ) -> Optional[Any]: '''simple docstring''' @wraps(UpperCamelCase_ ) def wrapper(self : Optional[Any] , UpperCamelCase_ : List[str] ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest("\"test requires Fairseq\"" ) else: test_case(self , UpperCamelCase_ ) return wrapper def A ( UpperCamelCase_ : List[Any] ) -> str: '''simple docstring''' @wraps(UpperCamelCase_ ) def wrapper(self : Optional[int] , UpperCamelCase_ : int ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest("\"test requires transformers\"" ) else: test_case(self , UpperCamelCase_ ) return wrapper def A ( UpperCamelCase_ : Any ) -> int: '''simple docstring''' @wraps(UpperCamelCase_ ) def wrapper(self : Optional[int] , UpperCamelCase_ : Optional[Any] ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest("\"test not supported on Windows\"" ) else: test_case(self , UpperCamelCase_ ) return wrapper def A ( ) -> Tuple: '''simple docstring''' lowerCAmelCase__ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @local class A ( parameterized.TestCase ): snake_case__ :Union[str, Any] = {} snake_case__ :Optional[Any] = None @pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" ) @pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = "[...]" lowerCAmelCase__ = importlib.import_module( datasets.load.metric_module_factory(os.path.join("metrics" , __magic_name__ ) ).module_path ) lowerCAmelCase__ = datasets.load.import_main_class(metric_module.__name__ , dataset=__magic_name__ ) # check parameters lowerCAmelCase__ = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(__magic_name__ , metric_module.__name__ ): with self.use_local_metrics(): try: lowerCAmelCase__ = doctest.testmod(__magic_name__ , verbose=__magic_name__ , raise_on_error=__magic_name__ ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Tuple ): """simple docstring""" lowerCAmelCase__ = "[...]" lowerCAmelCase__ = importlib.import_module( datasets.load.metric_module_factory(os.path.join("metrics" , __magic_name__ ) ).module_path ) # run doctest with self.use_local_metrics(): lowerCAmelCase__ = doctest.testmod(__magic_name__ , verbose=__magic_name__ , raise_on_error=__magic_name__ ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str ): """simple docstring""" if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](__magic_name__ ): yield else: yield @contextmanager def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" def load_local_metric(__magic_name__ : Union[str, Any] , *__magic_name__ : Any , **__magic_name__ : Any ): return load_metric(os.path.join("metrics" , __magic_name__ ) , *__magic_name__ , **__magic_name__ ) with patch("datasets.load_metric" ) as mock_load_metric: lowerCAmelCase__ = load_local_metric yield @classmethod def __SCREAMING_SNAKE_CASE ( cls : Any , __magic_name__ : Optional[int] ): """simple docstring""" def wrapper(__magic_name__ : Dict ): lowerCAmelCase__ = contextmanager(__magic_name__ ) lowerCAmelCase__ = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher("bleurt" ) def A ( UpperCamelCase_ : str ) -> Any: '''simple docstring''' import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags class A ( SCREAMING_SNAKE_CASE__ ): def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] ): """simple docstring""" assert len(input_dict["input_ids"] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch("bleurt.score._create_predictor" ) as mock_create_predictor: lowerCAmelCase__ = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher("bertscore" ) def A ( UpperCamelCase_ : List[Any] ) -> Optional[Any]: '''simple docstring''' import torch def bert_cos_score_idf(UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : List[str] ): return torch.tensor([[1.0, 1.0, 1.0]] * len(UpperCamelCase_ ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch("bert_score.scorer.get_model" ), patch( "bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf: lowerCAmelCase__ = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher("comet" ) def A ( UpperCamelCase_ : Optional[int] ) -> Any: '''simple docstring''' def load_from_checkpoint(UpperCamelCase_ : Tuple ): class A : def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] , *__magic_name__ : int , **__magic_name__ : Dict ): """simple docstring""" assert len(__magic_name__ ) == 2 lowerCAmelCase__ = [0.19, 0.92] return scores, sum(__magic_name__ ) / len(__magic_name__ ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch("comet.download_model" ) as mock_download_model: lowerCAmelCase__ = None with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint: lowerCAmelCase__ = load_from_checkpoint yield def A ( ) -> Tuple: '''simple docstring''' lowerCAmelCase__ = load_metric(os.path.join("metrics" , "seqeval" ) ) lowerCAmelCase__ = "ERROR" lowerCAmelCase__ = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}""" with pytest.raises(UpperCamelCase_ , match=re.escape(UpperCamelCase_ ) ): metric.compute(predictions=[] , references=[] , scheme=UpperCamelCase_ )
48
0
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 UpperCamelCase_ = get_tests_dir("fixtures") class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = mock.Mock() SCREAMING_SNAKE_CASE : Tuple = 500 SCREAMING_SNAKE_CASE : int = {} SCREAMING_SNAKE_CASE : Union[str, Any] = HTTPError SCREAMING_SNAKE_CASE : Tuple = {} # Download this model to make sure it's in the cache. SCREAMING_SNAKE_CASE : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('requests.Session.request', return_value=A ) as mock_head: SCREAMING_SNAKE_CASE : int = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' ) # This check we did call the fake head request mock_head.assert_called() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = WavaVecaFeatureExtractor.from_pretrained( 'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' ) @is_staging_test class _a ( unittest.TestCase ): '''simple docstring''' @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = TOKEN HfFolder.save_token(A ) @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' try: delete_repo(token=cls._token, repo_id='test-feature-extractor' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='valid_org/test-feature-extractor-org' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='test-dynamic-feature-extractor' ) except HTTPError: pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(A ) feature_extractor.push_to_hub('test-feature-extractor', use_auth_token=self._token ) SCREAMING_SNAKE_CASE : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(F"{USER}/test-feature-extractor" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) # Reset repo delete_repo(token=self._token, repo_id='test-feature-extractor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( A, repo_id='test-feature-extractor', push_to_hub=A, use_auth_token=self._token ) SCREAMING_SNAKE_CASE : List[Any] = WavaVecaFeatureExtractor.from_pretrained(F"{USER}/test-feature-extractor" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = WavaVecaFeatureExtractor.from_pretrained(A ) feature_extractor.push_to_hub('valid_org/test-feature-extractor', use_auth_token=self._token ) SCREAMING_SNAKE_CASE : int = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) # Reset repo delete_repo(token=self._token, repo_id='valid_org/test-feature-extractor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( A, repo_id='valid_org/test-feature-extractor-org', push_to_hub=A, use_auth_token=self._token ) SCREAMING_SNAKE_CASE : str = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(A, getattr(A, A ) ) def UpperCamelCase_ ( self ): '''simple docstring''' CustomFeatureExtractor.register_for_auto_class() SCREAMING_SNAKE_CASE : str = CustomFeatureExtractor.from_pretrained(A ) feature_extractor.push_to_hub('test-dynamic-feature-extractor', use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( feature_extractor.auto_map, {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'}, ) SCREAMING_SNAKE_CASE : Any = AutoFeatureExtractor.from_pretrained( F"{USER}/test-dynamic-feature-extractor", trust_remote_code=A ) # Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module self.assertEqual(new_feature_extractor.__class__.__name__, 'CustomFeatureExtractor' )
28
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool UpperCAmelCase__ : int = { "Acehnese Arabic": "ace_Arab", "Acehnese Latin": "ace_Latn", "Mesopotamian Arabic": "acm_Arab", "Ta'izzi-Adeni Arabic": "acq_Arab", "Tunisian Arabic": "aeb_Arab", "Afrikaans": "afr_Latn", "South Levantine Arabic": "ajp_Arab", "Akan": "aka_Latn", "Amharic": "amh_Ethi", "North Levantine Arabic": "apc_Arab", "Modern Standard Arabic": "arb_Arab", "Modern Standard Arabic Romanized": "arb_Latn", "Najdi Arabic": "ars_Arab", "Moroccan Arabic": "ary_Arab", "Egyptian Arabic": "arz_Arab", "Assamese": "asm_Beng", "Asturian": "ast_Latn", "Awadhi": "awa_Deva", "Central Aymara": "ayr_Latn", "South Azerbaijani": "azb_Arab", "North Azerbaijani": "azj_Latn", "Bashkir": "bak_Cyrl", "Bambara": "bam_Latn", "Balinese": "ban_Latn", "Belarusian": "bel_Cyrl", "Bemba": "bem_Latn", "Bengali": "ben_Beng", "Bhojpuri": "bho_Deva", "Banjar Arabic": "bjn_Arab", "Banjar Latin": "bjn_Latn", "Standard Tibetan": "bod_Tibt", "Bosnian": "bos_Latn", "Buginese": "bug_Latn", "Bulgarian": "bul_Cyrl", "Catalan": "cat_Latn", "Cebuano": "ceb_Latn", "Czech": "ces_Latn", "Chokwe": "cjk_Latn", "Central Kurdish": "ckb_Arab", "Crimean Tatar": "crh_Latn", "Welsh": "cym_Latn", "Danish": "dan_Latn", "German": "deu_Latn", "Southwestern Dinka": "dik_Latn", "Dyula": "dyu_Latn", "Dzongkha": "dzo_Tibt", "Greek": "ell_Grek", "English": "eng_Latn", "Esperanto": "epo_Latn", "Estonian": "est_Latn", "Basque": "eus_Latn", "Ewe": "ewe_Latn", "Faroese": "fao_Latn", "Fijian": "fij_Latn", "Finnish": "fin_Latn", "Fon": "fon_Latn", "French": "fra_Latn", "Friulian": "fur_Latn", "Nigerian Fulfulde": "fuv_Latn", "Scottish Gaelic": "gla_Latn", "Irish": "gle_Latn", "Galician": "glg_Latn", "Guarani": "grn_Latn", "Gujarati": "guj_Gujr", "Haitian Creole": "hat_Latn", "Hausa": "hau_Latn", "Hebrew": "heb_Hebr", "Hindi": "hin_Deva", "Chhattisgarhi": "hne_Deva", "Croatian": "hrv_Latn", "Hungarian": "hun_Latn", "Armenian": "hye_Armn", "Igbo": "ibo_Latn", "Ilocano": "ilo_Latn", "Indonesian": "ind_Latn", "Icelandic": "isl_Latn", "Italian": "ita_Latn", "Javanese": "jav_Latn", "Japanese": "jpn_Jpan", "Kabyle": "kab_Latn", "Jingpho": "kac_Latn", "Kamba": "kam_Latn", "Kannada": "kan_Knda", "Kashmiri Arabic": "kas_Arab", "Kashmiri Devanagari": "kas_Deva", "Georgian": "kat_Geor", "Central Kanuri Arabic": "knc_Arab", "Central Kanuri Latin": "knc_Latn", "Kazakh": "kaz_Cyrl", "Kabiyè": "kbp_Latn", "Kabuverdianu": "kea_Latn", "Khmer": "khm_Khmr", "Kikuyu": "kik_Latn", "Kinyarwanda": "kin_Latn", "Kyrgyz": "kir_Cyrl", "Kimbundu": "kmb_Latn", "Northern Kurdish": "kmr_Latn", "Kikongo": "kon_Latn", "Korean": "kor_Hang", "Lao": "lao_Laoo", "Ligurian": "lij_Latn", "Limburgish": "lim_Latn", "Lingala": "lin_Latn", "Lithuanian": "lit_Latn", "Lombard": "lmo_Latn", "Latgalian": "ltg_Latn", "Luxembourgish": "ltz_Latn", "Luba-Kasai": "lua_Latn", "Ganda": "lug_Latn", "Luo": "luo_Latn", "Mizo": "lus_Latn", "Standard Latvian": "lvs_Latn", "Magahi": "mag_Deva", "Maithili": "mai_Deva", "Malayalam": "mal_Mlym", "Marathi": "mar_Deva", "Minangkabau Arabic ": "min_Arab", "Minangkabau Latin": "min_Latn", "Macedonian": "mkd_Cyrl", "Plateau Malagasy": "plt_Latn", "Maltese": "mlt_Latn", "Meitei Bengali": "mni_Beng", "Halh Mongolian": "khk_Cyrl", "Mossi": "mos_Latn", "Maori": "mri_Latn", "Burmese": "mya_Mymr", "Dutch": "nld_Latn", "Norwegian Nynorsk": "nno_Latn", "Norwegian Bokmål": "nob_Latn", "Nepali": "npi_Deva", "Northern Sotho": "nso_Latn", "Nuer": "nus_Latn", "Nyanja": "nya_Latn", "Occitan": "oci_Latn", "West Central Oromo": "gaz_Latn", "Odia": "ory_Orya", "Pangasinan": "pag_Latn", "Eastern Panjabi": "pan_Guru", "Papiamento": "pap_Latn", "Western Persian": "pes_Arab", "Polish": "pol_Latn", "Portuguese": "por_Latn", "Dari": "prs_Arab", "Southern Pashto": "pbt_Arab", "Ayacucho Quechua": "quy_Latn", "Romanian": "ron_Latn", "Rundi": "run_Latn", "Russian": "rus_Cyrl", "Sango": "sag_Latn", "Sanskrit": "san_Deva", "Santali": "sat_Olck", "Sicilian": "scn_Latn", "Shan": "shn_Mymr", "Sinhala": "sin_Sinh", "Slovak": "slk_Latn", "Slovenian": "slv_Latn", "Samoan": "smo_Latn", "Shona": "sna_Latn", "Sindhi": "snd_Arab", "Somali": "som_Latn", "Southern Sotho": "sot_Latn", "Spanish": "spa_Latn", "Tosk Albanian": "als_Latn", "Sardinian": "srd_Latn", "Serbian": "srp_Cyrl", "Swati": "ssw_Latn", "Sundanese": "sun_Latn", "Swedish": "swe_Latn", "Swahili": "swh_Latn", "Silesian": "szl_Latn", "Tamil": "tam_Taml", "Tatar": "tat_Cyrl", "Telugu": "tel_Telu", "Tajik": "tgk_Cyrl", "Tagalog": "tgl_Latn", "Thai": "tha_Thai", "Tigrinya": "tir_Ethi", "Tamasheq Latin": "taq_Latn", "Tamasheq Tifinagh": "taq_Tfng", "Tok Pisin": "tpi_Latn", "Tswana": "tsn_Latn", "Tsonga": "tso_Latn", "Turkmen": "tuk_Latn", "Tumbuka": "tum_Latn", "Turkish": "tur_Latn", "Twi": "twi_Latn", "Central Atlas Tamazight": "tzm_Tfng", "Uyghur": "uig_Arab", "Ukrainian": "ukr_Cyrl", "Umbundu": "umb_Latn", "Urdu": "urd_Arab", "Northern Uzbek": "uzn_Latn", "Venetian": "vec_Latn", "Vietnamese": "vie_Latn", "Waray": "war_Latn", "Wolof": "wol_Latn", "Xhosa": "xho_Latn", "Eastern Yiddish": "ydd_Hebr", "Yoruba": "yor_Latn", "Yue Chinese": "yue_Hant", "Chinese Simplified": "zho_Hans", "Chinese Traditional": "zho_Hant", "Standard Malay": "zsm_Latn", "Zulu": "zul_Latn", } class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Tuple = 'facebook/nllb-200-distilled-600M' snake_case__ :Optional[Any] = ( 'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ' 'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ' 'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ' 'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.' ) snake_case__ :List[Any] = 'translator' snake_case__ :List[Any] = AutoTokenizer snake_case__ :Optional[Any] = AutoModelForSeqaSeqLM snake_case__ :List[str] = LANGUAGE_CODES snake_case__ :List[Any] = ['text', 'text', 'text'] snake_case__ :List[Any] = ['text'] def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ): """simple docstring""" if src_lang not in self.lang_to_code: raise ValueError(f"""{src_lang} is not a supported language.""" ) if tgt_lang not in self.lang_to_code: raise ValueError(f"""{tgt_lang} is not a supported language.""" ) lowerCAmelCase__ = self.lang_to_code[src_lang] lowerCAmelCase__ = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( __magic_name__ , return_tensors="pt" , src_lang=__magic_name__ , tgt_lang=__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] ): """simple docstring""" return self.model.generate(**__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Tuple ): """simple docstring""" return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__magic_name__ )
48
0
"""simple docstring""" import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ): a__: Tuple = CanineTokenizer a__: Tuple = False def UpperCAmelCase__ ( self ): super().setUp() lowerCamelCase_ = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCAmelCase__ ( self ): return CanineTokenizer.from_pretrained('''google/canine-s''' ) def UpperCAmelCase__ ( self , **UpperCAmelCase ): lowerCamelCase_ = self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase ) lowerCamelCase_ = 1024 return tokenizer @require_torch def UpperCAmelCase__ ( self ): lowerCamelCase_ = self.canine_tokenizer lowerCamelCase_ = ['''Life is like a box of chocolates.''', '''You never know what you\'re gonna get.'''] # fmt: off lowerCamelCase_ = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0] # fmt: on lowerCamelCase_ = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='''pt''' ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) lowerCamelCase_ = list(batch.input_ids.numpy()[0] ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def UpperCAmelCase__ ( self ): lowerCamelCase_ = self.canine_tokenizer lowerCamelCase_ = ['''Once there was a man.''', '''He wrote a test in HuggingFace Tranformers.'''] lowerCamelCase_ = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='''pt''' ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn('''input_ids''' , UpperCAmelCase ) self.assertIn('''attention_mask''' , UpperCAmelCase ) self.assertIn('''token_type_ids''' , UpperCAmelCase ) @require_torch def UpperCAmelCase__ ( self ): lowerCamelCase_ = self.canine_tokenizer lowerCamelCase_ = [ '''What\'s the weater?''', '''It\'s about 25 degrees.''', ] lowerCamelCase_ = tokenizer( text_target=UpperCAmelCase , max_length=32 , padding='''max_length''' , truncation=UpperCAmelCase , return_tensors='''pt''' ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) def UpperCAmelCase__ ( self ): # safety check on max_len default value so we are sure the test works lowerCamelCase_ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test lowerCamelCase_ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc lowerCamelCase_ = tempfile.mkdtemp() lowerCamelCase_ = ''' He is very happy, UNwant\u00E9d,running''' lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) tokenizer.save_pretrained(UpperCAmelCase ) lowerCamelCase_ = tokenizer.__class__.from_pretrained(UpperCAmelCase ) lowerCamelCase_ = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) shutil.rmtree(UpperCAmelCase ) lowerCamelCase_ = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # Isolate this from the other tests because we save additional tokens/etc lowerCamelCase_ = tempfile.mkdtemp() lowerCamelCase_ = ''' He is very happy, UNwant\u00E9d,running''' lowerCamelCase_ = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: lowerCamelCase_ = chr(0Xe_007 ) additional_special_tokens.append(UpperCAmelCase ) tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} ) lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) tokenizer.save_pretrained(UpperCAmelCase ) lowerCamelCase_ = tokenizer.__class__.from_pretrained(UpperCAmelCase ) lowerCamelCase_ = after_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) self.assertIn(UpperCAmelCase , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) lowerCamelCase_ = tokenizer.__class__.from_pretrained(UpperCAmelCase , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(UpperCAmelCase ) def UpperCAmelCase__ ( self ): lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): lowerCamelCase_ , lowerCamelCase_ = self.get_clean_sequence(UpperCAmelCase ) # a special token for Canine can be defined as follows: lowerCamelCase_ = 0Xe_005 lowerCamelCase_ = chr(UpperCAmelCase ) tokenizer.add_special_tokens({'''cls_token''': special_token} ) lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) self.assertEqual(len(UpperCAmelCase ) , 1 ) lowerCamelCase_ = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=UpperCAmelCase ) lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) self.assertEqual(UpperCAmelCase , input_encoded + special_token_id ) lowerCamelCase_ = tokenizer.decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase ) self.assertTrue(special_token not in decoded ) def UpperCAmelCase__ ( self ): lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): lowerCamelCase_ = chr(0Xe_005 ) lowerCamelCase_ = chr(0Xe_006 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=UpperCAmelCase ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({'''additional_special_tokens''': [SPECIAL_TOKEN_2]} ) lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase ) lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase ) self.assertEqual(len(UpperCAmelCase ) , 1 ) self.assertEqual(len(UpperCAmelCase ) , 1 ) self.assertEqual(token_a[0] , UpperCAmelCase ) self.assertEqual(token_a[0] , UpperCAmelCase ) @require_tokenizers def UpperCAmelCase__ ( self ): lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): # a special token for Canine can be defined as follows: lowerCamelCase_ = 0Xe_006 lowerCamelCase_ = chr(UpperCAmelCase ) lowerCamelCase_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase ) tokenizer.add_special_tokens({'''additional_special_tokens''': [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(UpperCAmelCase ) tokenizer.from_pretrained(UpperCAmelCase ) def UpperCAmelCase__ ( self ): lowerCamelCase_ = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(UpperCAmelCase ) with open(os.path.join(UpperCAmelCase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file: lowerCamelCase_ = json.load(UpperCAmelCase ) with open(os.path.join(UpperCAmelCase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file: lowerCamelCase_ = json.load(UpperCAmelCase ) # a special token for Canine can be defined as follows: lowerCamelCase_ = 0Xe_006 lowerCamelCase_ = chr(UpperCAmelCase ) lowerCamelCase_ = [new_token_a] lowerCamelCase_ = [new_token_a] with open(os.path.join(UpperCAmelCase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(UpperCAmelCase , UpperCAmelCase ) with open(os.path.join(UpperCAmelCase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(UpperCAmelCase , UpperCAmelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files lowerCamelCase_ = tokenizer_class.from_pretrained(UpperCAmelCase , extra_ids=0 ) self.assertIn(UpperCAmelCase , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) lowerCamelCase_ = 0Xe_007 lowerCamelCase_ = chr(UpperCAmelCase ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained lowerCamelCase_ = [AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase )] lowerCamelCase_ = tokenizer_class.from_pretrained( UpperCAmelCase , additional_special_tokens=UpperCAmelCase , extra_ids=0 ) self.assertIn(UpperCAmelCase , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def UpperCAmelCase__ ( self ): lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): lowerCamelCase_ = '''hello world''' if self.space_between_special_tokens: lowerCamelCase_ = '''[CLS] hello world [SEP]''' else: lowerCamelCase_ = input lowerCamelCase_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) lowerCamelCase_ = tokenizer.decode(UpperCAmelCase , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(UpperCAmelCase , [output, output.lower()] ) def UpperCAmelCase__ ( self ): lowerCamelCase_ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): lowerCamelCase_ = [ '''bos_token''', '''eos_token''', '''unk_token''', '''sep_token''', '''pad_token''', '''cls_token''', '''mask_token''', ] lowerCamelCase_ = '''a''' lowerCamelCase_ = ord(UpperCAmelCase ) for attr in attributes_list: setattr(UpperCAmelCase , attr + '''_id''' , UpperCAmelCase ) self.assertEqual(getattr(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(getattr(UpperCAmelCase , attr + '''_id''' ) , UpperCAmelCase ) setattr(UpperCAmelCase , attr + '''_id''' , UpperCAmelCase ) self.assertEqual(getattr(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(getattr(UpperCAmelCase , attr + '''_id''' ) , UpperCAmelCase ) setattr(UpperCAmelCase , '''additional_special_tokens_ids''' , [] ) self.assertListEqual(getattr(UpperCAmelCase , '''additional_special_tokens''' ) , [] ) self.assertListEqual(getattr(UpperCAmelCase , '''additional_special_tokens_ids''' ) , [] ) lowerCamelCase_ = 0Xe_006 lowerCamelCase_ = chr(UpperCAmelCase ) setattr(UpperCAmelCase , '''additional_special_tokens_ids''' , [additional_special_token_id] ) self.assertListEqual(getattr(UpperCAmelCase , '''additional_special_tokens''' ) , [additional_special_token] ) self.assertListEqual(getattr(UpperCAmelCase , '''additional_special_tokens_ids''' ) , [additional_special_token_id] ) def UpperCAmelCase__ ( self ): pass def UpperCAmelCase__ ( self ): pass def UpperCAmelCase__ ( self ): pass def UpperCAmelCase__ ( self ): pass def UpperCAmelCase__ ( self ): pass def UpperCAmelCase__ ( self ): pass def UpperCAmelCase__ ( self ): pass def UpperCAmelCase__ ( self ): pass
29
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ : int = logging.get_logger(__name__) class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Any = 'timm_backbone' def __init__( self : Tuple , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=3 , __magic_name__ : Dict=True , __magic_name__ : str=True , __magic_name__ : List[Any]=None , **__magic_name__ : Tuple , ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = backbone lowerCAmelCase__ = num_channels lowerCAmelCase__ = features_only lowerCAmelCase__ = use_pretrained_backbone lowerCAmelCase__ = True lowerCAmelCase__ = out_indices if out_indices is not None else (-1,)
48
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { 'facebook/s2t-small-librispeech-asr': ( 'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class __a( _a ): """simple docstring""" lowerCAmelCase = '''speech_to_text''' lowerCAmelCase = ['''past_key_values'''] lowerCAmelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self ,_SCREAMING_SNAKE_CASE=10_000 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=2_048 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=6 ,_SCREAMING_SNAKE_CASE=2_048 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE="relu" ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=6_000 ,_SCREAMING_SNAKE_CASE=1_024 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=(5, 5) ,_SCREAMING_SNAKE_CASE=1_024 ,_SCREAMING_SNAKE_CASE=80 ,_SCREAMING_SNAKE_CASE=1 ,**_SCREAMING_SNAKE_CASE ,) -> Union[str, Any]: UpperCAmelCase_ : Dict = vocab_size UpperCAmelCase_ : Any = d_model UpperCAmelCase_ : Union[str, Any] = encoder_ffn_dim UpperCAmelCase_ : List[str] = encoder_layers UpperCAmelCase_ : Tuple = encoder_attention_heads UpperCAmelCase_ : List[str] = decoder_ffn_dim UpperCAmelCase_ : int = decoder_layers UpperCAmelCase_ : int = decoder_attention_heads UpperCAmelCase_ : Tuple = dropout UpperCAmelCase_ : Any = attention_dropout UpperCAmelCase_ : Dict = activation_dropout UpperCAmelCase_ : Optional[Any] = activation_function UpperCAmelCase_ : int = init_std UpperCAmelCase_ : Dict = encoder_layerdrop UpperCAmelCase_ : List[str] = decoder_layerdrop UpperCAmelCase_ : Optional[int] = use_cache UpperCAmelCase_ : List[str] = encoder_layers UpperCAmelCase_ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True UpperCAmelCase_ : str = max_source_positions UpperCAmelCase_ : Any = max_target_positions UpperCAmelCase_ : Optional[int] = num_conv_layers UpperCAmelCase_ : Optional[int] = list(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : str = conv_channels UpperCAmelCase_ : List[Any] = input_feat_per_channel UpperCAmelCase_ : List[str] = input_channels if len(self.conv_kernel_sizes ) != self.num_conv_layers: raise ValueError( '''Configuration for convolutional module is incorrect. ''' '''It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` ''' f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, ''' f'''`config.num_conv_layers = {self.num_conv_layers}`.''' ) super().__init__( pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE ,is_encoder_decoder=_SCREAMING_SNAKE_CASE ,decoder_start_token_id=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
30
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Tuple = 'Salesforce/blip-image-captioning-base' snake_case__ :List[Any] = ( 'This is a tool that generates a description of an image. It takes an input named `image` which should be the ' 'image to caption, and returns a text that contains the description in English.' ) snake_case__ :List[Any] = 'image_captioner' snake_case__ :Optional[int] = AutoModelForVisionaSeq snake_case__ :Optional[int] = ['image'] snake_case__ :Any = ['text'] def __init__( self : str , *__magic_name__ : List[str] , **__magic_name__ : Tuple ): """simple docstring""" requires_backends(self , ["vision"] ) super().__init__(*__magic_name__ , **__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : "Image" ): """simple docstring""" return self.pre_processor(images=__magic_name__ , return_tensors="pt" ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Tuple ): """simple docstring""" return self.model.generate(**__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Optional[int] ): """simple docstring""" return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0].strip()
48
0
import json import os import unittest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowercase_ = CLIPTokenizer lowercase_ = CLIPTokenizerFast lowercase_ = True lowercase_ = {} lowercase_ = False def lowerCAmelCase_ ( self : Optional[Any] ): super().setUp() # fmt: off SCREAMING_SNAKE_CASE_ = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>'] # fmt: on SCREAMING_SNAKE_CASE_ = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) ) SCREAMING_SNAKE_CASE_ = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>'] SCREAMING_SNAKE_CASE_ = {'unk_token': '<unk>'} SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(_lowerCAmelCase ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(_lowerCAmelCase ) ) def lowerCAmelCase_ ( self : str , **_lowerCAmelCase : Optional[Any] ): kwargs.update(self.special_tokens_map ) return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase ) def lowerCAmelCase_ ( self : Tuple , **_lowerCAmelCase : int ): kwargs.update(self.special_tokens_map ) return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCAmelCase ) def lowerCAmelCase_ ( self : int , _lowerCAmelCase : List[Any] ): SCREAMING_SNAKE_CASE_ = 'lower newer' SCREAMING_SNAKE_CASE_ = 'lower newer' return input_text, output_text def lowerCAmelCase_ ( self : str ): SCREAMING_SNAKE_CASE_ = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) SCREAMING_SNAKE_CASE_ = 'lower newer' SCREAMING_SNAKE_CASE_ = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>'] SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE_ = [10, 2, 16, 9, 3, 2, 16, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase ) @require_ftfy def lowerCAmelCase_ ( self : Dict ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.' SCREAMING_SNAKE_CASE_ = tokenizer_s.tokenize(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = tokenizer_r.tokenize(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) # Test that the tokenization is identical on an example containing a character (Latin Small Letter A # with Tilde) encoded in 2 different ways SCREAMING_SNAKE_CASE_ = 'xa\u0303y' + ' ' + 'x\xe3y' SCREAMING_SNAKE_CASE_ = tokenizer_s.tokenize(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = tokenizer_r.tokenize(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) # Test that the tokenization is identical on unicode of space type SCREAMING_SNAKE_CASE_ = [ '\u0009', # (horizontal tab, '\t') '\u000B', # (vertical tab) '\u000C', # (form feed) '\u0020', # (space, ' ') '\u200E', # (left-to-right mark):w '\u200F', # (right-to-left mark) ] for unicode_seq in spaces_unicodes: SCREAMING_SNAKE_CASE_ = tokenizer_s.tokenize(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = tokenizer_r.tokenize(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) # Test that the tokenization is identical on unicode of line break type SCREAMING_SNAKE_CASE_ = [ '\u000A', # (line feed, '\n') '\r\n', # (carriage return and line feed, '\r\n') '\u000D', # (carriage return, '\r') '\r', # (carriage return, '\r') '\u000D', # (carriage return, '\r') '\u2028', # (line separator) '\u2029', # (paragraph separator) # "\u0085", # (next line) ] # The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms # it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a # space (and thus into an empty list). for unicode_seq in line_break_unicodes: SCREAMING_SNAKE_CASE_ = tokenizer_s.tokenize(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = tokenizer_r.tokenize(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): SCREAMING_SNAKE_CASE_ = 'hello' # `hello` is a token in the vocabulary of `pretrained_name` SCREAMING_SNAKE_CASE_ = F"{text_of_1_token} {text_of_1_token}" SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained( _lowerCAmelCase , use_fast=_lowerCAmelCase , ) SCREAMING_SNAKE_CASE_ = tokenizer_r(_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_lowerCAmelCase ) + 1, len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) , ) SCREAMING_SNAKE_CASE_ = F" {text}" SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained( _lowerCAmelCase , use_fast=_lowerCAmelCase , ) SCREAMING_SNAKE_CASE_ = tokenizer_r(_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_lowerCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_lowerCAmelCase ) + 1, 1 + len(_lowerCAmelCase ) + 1 + len(_lowerCAmelCase )) , ) def lowerCAmelCase_ ( self : Optional[int] ): # Test related to the breaking change introduced in transformers v4.17.0 # We need to check that an error in raised when the user try to load a previous version of the tokenizer. with self.assertRaises(_lowerCAmelCase ) as context: self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' ) self.assertTrue( context.exception.args[0].startswith( 'The `backend_tokenizer` provided does not match the expected format.' ) ) @require_ftfy def lowerCAmelCase_ ( self : Dict ): super().test_tokenization_python_rust_equals() def lowerCAmelCase_ ( self : Any ): # CLIP always lower cases letters pass
31
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging UpperCAmelCase__ : Tuple = logging.get_logger(__name__) UpperCAmelCase__ : Union[str, Any] = "▁" UpperCAmelCase__ : List[str] = {"vocab_file": "sentencepiece.bpe.model"} UpperCAmelCase__ : Union[str, Any] = { "vocab_file": { "facebook/mbart-large-50-one-to-many-mmt": ( "https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model" ), } } UpperCAmelCase__ : Optional[Any] = { "facebook/mbart-large-50-one-to-many-mmt": 10_24, } # fmt: off UpperCAmelCase__ : Tuple = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"] class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Optional[int] = VOCAB_FILES_NAMES snake_case__ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ :Any = PRETRAINED_VOCAB_FILES_MAP snake_case__ :Tuple = ['input_ids', 'attention_mask'] snake_case__ :List[int] = [] snake_case__ :List[int] = [] def __init__( self : int , __magic_name__ : int , __magic_name__ : Dict=None , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[int]="</s>" , __magic_name__ : List[Any]="</s>" , __magic_name__ : List[Any]="<s>" , __magic_name__ : Tuple="<unk>" , __magic_name__ : List[Any]="<pad>" , __magic_name__ : List[Any]="<mask>" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : List[Any] , ): """simple docstring""" lowerCAmelCase__ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs lowerCAmelCase__ = kwargs.get("additional_special_tokens" , [] ) kwargs["additional_special_tokens"] += [ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=__magic_name__ , tgt_lang=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , ) lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__magic_name__ ) ) lowerCAmelCase__ = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token lowerCAmelCase__ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowerCAmelCase__ = 1 lowerCAmelCase__ = len(self.sp_model ) lowerCAmelCase__ = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__magic_name__ ) } lowerCAmelCase__ = {v: k for k, v in self.lang_code_to_id.items()} lowerCAmelCase__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) lowerCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} lowerCAmelCase__ = src_lang if src_lang is not None else "en_XX" lowerCAmelCase__ = self.lang_code_to_id[self._src_lang] lowerCAmelCase__ = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def __SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" return self._src_lang @src_lang.setter def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ): """simple docstring""" lowerCAmelCase__ = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : Dict ): """simple docstring""" lowerCAmelCase__ = self.__dict__.copy() lowerCAmelCase__ = None return state def __setstate__( self : List[Any] , __magic_name__ : Dict ): """simple docstring""" lowerCAmelCase__ = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): lowerCAmelCase__ = {} lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" lowerCAmelCase__ = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : str ): """simple docstring""" return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ): """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowerCAmelCase__ = self.sp_model.PieceToId(__magic_name__ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : int ): """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[Any] ): """simple docstring""" lowerCAmelCase__ = [] lowerCAmelCase__ = "" lowerCAmelCase__ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__magic_name__ ) + token lowerCAmelCase__ = True lowerCAmelCase__ = [] else: current_sub_tokens.append(__magic_name__ ) lowerCAmelCase__ = False out_string += self.sp_model.decode(__magic_name__ ) return out_string.strip() def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ): """simple docstring""" if not os.path.isdir(__magic_name__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ = os.path.join( __magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __magic_name__ ) elif not os.path.isfile(self.vocab_file ): with open(__magic_name__ , "wb" ) as fi: lowerCAmelCase__ = self.sp_model.serialized_model_proto() fi.write(__magic_name__ ) return (out_vocab_file,) def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ ) lowerCAmelCase__ = [1] * len(self.prefix_tokens ) lowerCAmelCase__ = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__magic_name__ )) + suffix_ones return prefix_ones + ([0] * len(__magic_name__ )) + ([0] * len(__magic_name__ )) + suffix_ones def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Optional[str] , __magic_name__ : Optional[str] , **__magic_name__ : Optional[Any] ): """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) lowerCAmelCase__ = src_lang lowerCAmelCase__ = self(__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ ) lowerCAmelCase__ = self.convert_tokens_to_ids(__magic_name__ ) lowerCAmelCase__ = tgt_lang_id return inputs def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : str = "en_XX" , __magic_name__ : Optional[List[str]] = None , __magic_name__ : str = "ro_RO" , **__magic_name__ : Union[str, Any] , ): """simple docstring""" lowerCAmelCase__ = src_lang lowerCAmelCase__ = tgt_lang return super().prepare_seqaseq_batch(__magic_name__ , __magic_name__ , **__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ): """simple docstring""" lowerCAmelCase__ = self.lang_code_to_id[src_lang] lowerCAmelCase__ = [self.cur_lang_code_id] lowerCAmelCase__ = [self.eos_token_id] def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ): """simple docstring""" lowerCAmelCase__ = self.lang_code_to_id[tgt_lang] lowerCAmelCase__ = [self.cur_lang_code_id] lowerCAmelCase__ = [self.eos_token_id]
48
0
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class __UpperCamelCase : def __init__( self , _UpperCamelCase , _UpperCamelCase=2 , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=10 , _UpperCamelCase=3 , _UpperCamelCase=32 * 4 , _UpperCamelCase=32 * 6 , _UpperCamelCase=4 , _UpperCamelCase=32 , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = is_training _UpperCAmelCase = use_auxiliary_loss _UpperCAmelCase = num_queries _UpperCAmelCase = num_channels _UpperCAmelCase = min_size _UpperCAmelCase = max_size _UpperCAmelCase = num_labels _UpperCAmelCase = mask_feature_size def UpperCamelCase( self ): _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( _UpperCamelCase ) _UpperCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_UpperCamelCase ) _UpperCAmelCase = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_UpperCamelCase ) > 0.5 ).float() _UpperCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=_UpperCamelCase ) > 0.5).long() _UpperCAmelCase = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def UpperCamelCase( self ): return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def UpperCamelCase( self ): _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ): _UpperCAmelCase = output.encoder_hidden_states _UpperCAmelCase = output.pixel_decoder_hidden_states _UpperCAmelCase = output.transformer_decoder_hidden_states self.parent.assertTrue(len(_UpperCamelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(_UpperCamelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(_UpperCamelCase ) , config.decoder_config.decoder_layers ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ): with torch.no_grad(): _UpperCAmelCase = MaskFormerModel(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() _UpperCAmelCase = model(pixel_values=_UpperCamelCase , pixel_mask=_UpperCamelCase ) _UpperCAmelCase = model(_UpperCamelCase , output_hidden_states=_UpperCamelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(_UpperCamelCase , _UpperCamelCase ) def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): _UpperCAmelCase = MaskFormerForInstanceSegmentation(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() def comm_check_on_output(_UpperCamelCase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _UpperCAmelCase = model(pixel_values=_UpperCamelCase , pixel_mask=_UpperCamelCase ) _UpperCAmelCase = model(_UpperCamelCase ) comm_check_on_output(_UpperCamelCase ) _UpperCAmelCase = model( pixel_values=_UpperCamelCase , pixel_mask=_UpperCamelCase , mask_labels=_UpperCamelCase , class_labels=_UpperCamelCase ) comm_check_on_output(_UpperCamelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class __UpperCamelCase ( A__ , A__ , unittest.TestCase ): __A : int = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () __A : int = ( {"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) __A : List[str] = False __A : Optional[Any] = False __A : Union[str, Any] = False __A : Optional[Any] = False def UpperCamelCase( self ): _UpperCAmelCase = MaskFormerModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase ) def UpperCamelCase( self ): self.config_tester.run_common_tests() def UpperCamelCase( self ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(_UpperCamelCase , **_UpperCamelCase , output_hidden_states=_UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_UpperCamelCase ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def UpperCamelCase( self ): pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def UpperCamelCase( self ): pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def UpperCamelCase( self ): pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def UpperCamelCase( self ): pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def UpperCamelCase( self ): pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def UpperCamelCase( self ): pass def UpperCamelCase( self ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_UpperCamelCase ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _UpperCamelCase ) @slow def UpperCamelCase( self ): for model_name in ["facebook/maskformer-swin-small-coco"]: _UpperCAmelCase = MaskFormerModel.from_pretrained(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase = (self.model_tester.min_size,) * 2 _UpperCAmelCase = { '''pixel_values''': torch.randn((2, 3, *size) , device=_UpperCamelCase ), '''mask_labels''': torch.randn((2, 10, *size) , device=_UpperCamelCase ), '''class_labels''': torch.zeros(2 , 10 , device=_UpperCamelCase ).long(), } _UpperCAmelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_UpperCamelCase ) _UpperCAmelCase = model(**_UpperCamelCase ) self.assertTrue(outputs.loss is not None ) def UpperCamelCase( self ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(_UpperCamelCase , **_UpperCamelCase , output_hidden_states=_UpperCamelCase ) def UpperCamelCase( self ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(_UpperCamelCase ).to(_UpperCamelCase ) _UpperCAmelCase = model(**_UpperCamelCase , output_attentions=_UpperCamelCase ) self.assertTrue(outputs.attentions is not None ) def UpperCamelCase( self ): if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _UpperCAmelCase = self.all_model_classes[1] _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() _UpperCAmelCase = model_class(_UpperCamelCase ) model.to(_UpperCamelCase ) model.train() _UpperCAmelCase = model(_UpperCamelCase , mask_labels=_UpperCamelCase , class_labels=_UpperCamelCase ).loss loss.backward() def UpperCamelCase( self ): # only MaskFormerForInstanceSegmentation has the loss _UpperCAmelCase = self.all_model_classes[1] _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = model_class(_UpperCamelCase ) model.to(_UpperCamelCase ) model.train() _UpperCAmelCase = model(_UpperCamelCase , mask_labels=_UpperCamelCase , class_labels=_UpperCamelCase ) _UpperCAmelCase = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _UpperCAmelCase = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _UpperCAmelCase = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _UpperCAmelCase = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=_UpperCamelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) UpperCAmelCase_ = 1e-4 def A__ ( ) -> Dict: """simple docstring""" _UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class __UpperCamelCase ( unittest.TestCase ): @cached_property def UpperCamelCase( self ): return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def UpperCamelCase( self ): _UpperCAmelCase = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(_UpperCamelCase ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase ) _UpperCAmelCase = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_UpperCamelCase , (1, 3, 800, 1088) ) with torch.no_grad(): _UpperCAmelCase = model(**_UpperCamelCase ) _UpperCAmelCase = torch.tensor( [[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(_UpperCamelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) ) _UpperCAmelCase = torch.tensor( [[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(_UpperCamelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) ) _UpperCAmelCase = torch.tensor( [[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(_UpperCamelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) ) def UpperCamelCase( self ): _UpperCAmelCase = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(_UpperCamelCase ) .eval() ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase ) _UpperCAmelCase = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_UpperCamelCase , (1, 3, 800, 1088) ) with torch.no_grad(): _UpperCAmelCase = model(**_UpperCamelCase ) # masks_queries_logits _UpperCAmelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _UpperCAmelCase = [ [-1.3737124, -1.7724937, -1.9364233], [-1.5977281, -1.9867939, -2.1523695], [-1.5795398, -1.9269832, -2.093942], ] _UpperCAmelCase = torch.tensor(_UpperCamelCase ).to(_UpperCamelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) ) # class_queries_logits _UpperCAmelCase = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _UpperCAmelCase = torch.tensor( [ [1.6512e00, -5.2572e00, -3.3519e00], [3.6169e-02, -5.9025e00, -2.9313e00], [1.0766e-04, -7.7630e00, -5.1263e00], ] ).to(_UpperCamelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) ) def UpperCamelCase( self ): _UpperCAmelCase = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(_UpperCamelCase ) .eval() ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase ) _UpperCAmelCase = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(_UpperCamelCase , (1, 3, 800, 1088) ) with torch.no_grad(): _UpperCAmelCase = model(**_UpperCamelCase ) # masks_queries_logits _UpperCAmelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _UpperCAmelCase = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]] _UpperCAmelCase = torch.tensor(_UpperCamelCase ).to(_UpperCamelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) ) # class_queries_logits _UpperCAmelCase = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _UpperCAmelCase = torch.tensor( [[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(_UpperCamelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCamelCase , atol=_UpperCamelCase ) ) def UpperCamelCase( self ): _UpperCAmelCase = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(_UpperCamelCase ) .eval() ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , ) _UpperCAmelCase = inputs['''pixel_values'''].to(_UpperCamelCase ) _UpperCAmelCase = [el.to(_UpperCamelCase ) for el in inputs['''mask_labels''']] _UpperCAmelCase = [el.to(_UpperCamelCase ) for el in inputs['''class_labels''']] with torch.no_grad(): _UpperCAmelCase = model(**_UpperCamelCase ) self.assertTrue(outputs.loss is not None )
32
'''simple docstring''' from random import randint from tempfile import TemporaryFile import numpy as np def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] ) -> Dict: '''simple docstring''' lowerCAmelCase__ = 0 if start < end: lowerCAmelCase__ = randint(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ = a[end] lowerCAmelCase__ = a[pivot] lowerCAmelCase__ = temp lowerCAmelCase__ ,lowerCAmelCase__ = _in_place_partition(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) count += _in_place_quick_sort(UpperCamelCase_ , UpperCamelCase_ , p - 1 ) count += _in_place_quick_sort(UpperCamelCase_ , p + 1 , UpperCamelCase_ ) return count def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any ) -> Dict: '''simple docstring''' lowerCAmelCase__ = 0 lowerCAmelCase__ = randint(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ = a[end] lowerCAmelCase__ = a[pivot] lowerCAmelCase__ = temp lowerCAmelCase__ = start - 1 for index in range(UpperCamelCase_ , UpperCamelCase_ ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value lowerCAmelCase__ = new_pivot_index + 1 lowerCAmelCase__ = a[new_pivot_index] lowerCAmelCase__ = a[index] lowerCAmelCase__ = temp lowerCAmelCase__ = a[new_pivot_index + 1] lowerCAmelCase__ = a[end] lowerCAmelCase__ = temp return new_pivot_index + 1, count UpperCAmelCase__ : Tuple = TemporaryFile() UpperCAmelCase__ : List[str] = 1_00 # 1000 elements are to be sorted UpperCAmelCase__ , UpperCAmelCase__ : Dict = 0, 1 # mean and standard deviation UpperCAmelCase__ : Tuple = np.random.normal(mu, sigma, p) np.save(outfile, X) print("The array is") print(X) outfile.seek(0) # using the same array UpperCAmelCase__ : Optional[Any] = np.load(outfile) UpperCAmelCase__ : Any = len(M) - 1 UpperCAmelCase__ : Tuple = _in_place_quick_sort(M, 0, r) print( "No of Comparisons for 100 elements selected from a standard normal distribution" "is :" ) print(z)
48
0
import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __magic_name__ (snake_case_ ,unittest.TestCase ): '''simple docstring''' __lowercase : Optional[Any] = OpenAIGPTTokenizer __lowercase : int = OpenAIGPTTokenizerFast __lowercase : str = True __lowercase : Dict = False def SCREAMING_SNAKE_CASE__ ( self:List[str] ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt snake_case__ = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] snake_case__ = dict(zip(_a , range(len(_a ) ) ) ) snake_case__ = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', ''''''] snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(_a ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(_a ) ) def SCREAMING_SNAKE_CASE__ ( self:int , _a:Dict ): return "lower newer", "lower newer" def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file ) snake_case__ = '''lower''' snake_case__ = ['''low''', '''er</w>'''] snake_case__ = tokenizer.tokenize(_a ) self.assertListEqual(_a , _a ) snake_case__ = tokens + ['''<unk>'''] snake_case__ = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a ) def SCREAMING_SNAKE_CASE__ ( self:Optional[int] , _a:Dict=15 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): snake_case__ = self.rust_tokenizer_class.from_pretrained(_a , **_a ) # Simple input snake_case__ = '''This is a simple input''' snake_case__ = ['''This is a simple input 1''', '''This is a simple input 2'''] snake_case__ = ('''This is a simple input''', '''This is a pair''') snake_case__ = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(_a , tokenizer_r.encode , _a , max_length=_a , padding='''max_length''' ) # Simple input self.assertRaises(_a , tokenizer_r.encode_plus , _a , max_length=_a , padding='''max_length''' ) # Simple input self.assertRaises( _a , tokenizer_r.batch_encode_plus , _a , max_length=_a , padding='''max_length''' , ) # Pair input self.assertRaises(_a , tokenizer_r.encode , _a , max_length=_a , padding='''max_length''' ) # Pair input self.assertRaises(_a , tokenizer_r.encode_plus , _a , max_length=_a , padding='''max_length''' ) # Pair input self.assertRaises( _a , tokenizer_r.batch_encode_plus , _a , max_length=_a , padding='''max_length''' , ) def SCREAMING_SNAKE_CASE__ ( self:Any ): pass @require_ftfy @require_spacy @require_tokenizers class __magic_name__ (snake_case_ ): '''simple docstring''' pass
33
'''simple docstring''' import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def A ( UpperCamelCase_ : List[Any] ) -> Tuple: '''simple docstring''' if "img_encoder.pos_embed" in name: lowerCAmelCase__ = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" ) if "img_encoder.patch_embed.proj" in name: lowerCAmelCase__ = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" ) if "img_encoder.patch_embed.norm" in name: lowerCAmelCase__ = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" ) if "img_encoder.layers" in name: lowerCAmelCase__ = name.replace("img_encoder.layers" , "vision_model.encoder.stages" ) if "blocks" in name and "res" not in name: lowerCAmelCase__ = name.replace("blocks" , "layers" ) if "attn" in name and "pre_assign" not in name: lowerCAmelCase__ = name.replace("attn" , "self_attn" ) if "proj" in name and "self_attn" in name and "text" not in name: lowerCAmelCase__ = name.replace("proj" , "out_proj" ) if "pre_assign_attn.attn.proj" in name: lowerCAmelCase__ = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" ) if "norm1" in name: lowerCAmelCase__ = name.replace("norm1" , "layer_norm1" ) if "norm2" in name and "pre_assign" not in name: lowerCAmelCase__ = name.replace("norm2" , "layer_norm2" ) if "img_encoder.norm" in name: lowerCAmelCase__ = name.replace("img_encoder.norm" , "vision_model.layernorm" ) # text encoder if "text_encoder.token_embedding" in name: lowerCAmelCase__ = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" ) if "text_encoder.positional_embedding" in name: lowerCAmelCase__ = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" ) if "text_encoder.transformer.resblocks." in name: lowerCAmelCase__ = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." ) if "ln_1" in name: lowerCAmelCase__ = name.replace("ln_1" , "layer_norm1" ) if "ln_2" in name: lowerCAmelCase__ = name.replace("ln_2" , "layer_norm2" ) if "c_fc" in name: lowerCAmelCase__ = name.replace("c_fc" , "fc1" ) if "c_proj" in name: lowerCAmelCase__ = name.replace("c_proj" , "fc2" ) if "text_encoder" in name: lowerCAmelCase__ = name.replace("text_encoder" , "text_model" ) if "ln_final" in name: lowerCAmelCase__ = name.replace("ln_final" , "final_layer_norm" ) # projection layers if "img_projector.linear_hidden." in name: lowerCAmelCase__ = name.replace("img_projector.linear_hidden." , "visual_projection." ) if "img_projector.linear_out." in name: lowerCAmelCase__ = name.replace("img_projector.linear_out." , "visual_projection.3." ) if "text_projector.linear_hidden" in name: lowerCAmelCase__ = name.replace("text_projector.linear_hidden" , "text_projection" ) if "text_projector.linear_out" in name: lowerCAmelCase__ = name.replace("text_projector.linear_out" , "text_projection.3" ) return name def A ( UpperCamelCase_ : str , UpperCamelCase_ : str ) -> List[Any]: '''simple docstring''' for key in orig_state_dict.copy().keys(): lowerCAmelCase__ = orig_state_dict.pop(UpperCamelCase_ ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors lowerCAmelCase__ = key.split("." ) lowerCAmelCase__ ,lowerCAmelCase__ = int(key_split[2] ), int(key_split[4] ) lowerCAmelCase__ = config.vision_config.hidden_size if "weight" in key: lowerCAmelCase__ = val[:dim, :] lowerCAmelCase__ = val[dim : dim * 2, :] lowerCAmelCase__ = val[-dim:, :] else: lowerCAmelCase__ = val[:dim] lowerCAmelCase__ = val[dim : dim * 2] lowerCAmelCase__ = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors lowerCAmelCase__ = key.split("." ) lowerCAmelCase__ = int(key_split[3] ) lowerCAmelCase__ = config.text_config.hidden_size if "weight" in key: lowerCAmelCase__ = val[:dim, :] lowerCAmelCase__ = val[ dim : dim * 2, : ] lowerCAmelCase__ = val[-dim:, :] else: lowerCAmelCase__ = val[:dim] lowerCAmelCase__ = val[dim : dim * 2] lowerCAmelCase__ = val[-dim:] else: lowerCAmelCase__ = rename_key(UpperCamelCase_ ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): lowerCAmelCase__ = val.squeeze_() else: lowerCAmelCase__ = val return orig_state_dict def A ( ) -> Optional[int]: '''simple docstring''' lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ) return im @torch.no_grad() def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple="groupvit-gcc-yfcc" , UpperCamelCase_ : Dict=False ) -> Any: '''simple docstring''' lowerCAmelCase__ = GroupViTConfig() lowerCAmelCase__ = GroupViTModel(UpperCamelCase_ ).eval() lowerCAmelCase__ = torch.load(UpperCamelCase_ , map_location="cpu" )["model"] lowerCAmelCase__ = convert_state_dict(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ ,lowerCAmelCase__ = model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCamelCase_ ) == 0) # verify result lowerCAmelCase__ = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" ) lowerCAmelCase__ = prepare_img() lowerCAmelCase__ = processor(text=["a photo of a cat", "a photo of a dog"] , images=UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors="pt" ) with torch.no_grad(): lowerCAmelCase__ = model(**UpperCamelCase_ ) if model_name == "groupvit-gcc-yfcc": lowerCAmelCase__ = torch.tensor([[13.3_523, 6.3_629]] ) elif model_name == "groupvit-gcc-redcaps": lowerCAmelCase__ = torch.tensor([[16.1_873, 8.6_230]] ) else: raise ValueError(F"""Model name {model_name} not supported.""" ) assert torch.allclose(outputs.logits_per_image , UpperCamelCase_ , atol=1E-3 ) processor.save_pretrained(UpperCamelCase_ ) model.save_pretrained(UpperCamelCase_ ) print("Successfully saved processor and model to" , UpperCamelCase_ ) if push_to_hub: print("Pushing to the hub..." ) processor.push_to_hub(UpperCamelCase_ , organization="nielsr" ) model.push_to_hub(UpperCamelCase_ , organization="nielsr" ) if __name__ == "__main__": UpperCAmelCase__ : List[str] = argparse.ArgumentParser() parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model." ) parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint") parser.add_argument( "--model_name", default="groupvit-gccy-fcc", type=str, help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'", ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.", ) UpperCAmelCase__ : Any = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
48
0
"""simple docstring""" import warnings from .generation import TFGenerationMixin class snake_case_ ( lowerCamelCase_ ): """simple docstring""" warnings.warn( '''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will ''' '''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , lowerCamelCase_ , )
34
'''simple docstring''' from __future__ import annotations from functools import lru_cache from math import ceil UpperCAmelCase__ : Optional[Any] = 1_00 UpperCAmelCase__ : Any = set(range(3, NUM_PRIMES, 2)) primes.add(2) UpperCAmelCase__ : int for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=1_00 ) def A ( UpperCamelCase_ : int ) -> set[int]: '''simple docstring''' if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} lowerCAmelCase__ = set() lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def A ( UpperCamelCase_ : int = 50_00 ) -> int | None: '''simple docstring''' for number_to_partition in range(1 , UpperCamelCase_ ): if len(partition(UpperCamelCase_ ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(F"{solution() = }")
48
0
import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class lowercase ( unittest.TestCase ): def __init__( self : Dict , _lowercase : Tuple , _lowercase : Optional[Any]=2 , _lowercase : int=56 , _lowercase : List[str]=True , _lowercase : Tuple=True , _lowercase : Optional[Any]=True , _lowercase : Optional[Any]=True , _lowercase : List[str]=99 , _lowercase : Dict=32 , _lowercase : Dict=2 , _lowercase : str=2 , _lowercase : Union[str, Any]=7 , _lowercase : Union[str, Any]="gelu_new" , _lowercase : List[Any]=0.1 , _lowercase : str=0.1 , _lowercase : Dict=5_12 , _lowercase : List[Any]=16 , _lowercase : Optional[int]=2 , _lowercase : Dict=0.02 , _lowercase : Optional[int]=4 , _lowercase : str="block_sparse" , _lowercase : str=True , _lowercase : List[str]=False , _lowercase : Any=2 , _lowercase : Tuple=3 , ): SCREAMING_SNAKE_CASE__ : Any = parent SCREAMING_SNAKE_CASE__ : int = batch_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = seq_length SCREAMING_SNAKE_CASE__ : Tuple = is_training SCREAMING_SNAKE_CASE__ : Optional[Any] = use_attention_mask SCREAMING_SNAKE_CASE__ : int = use_token_type_ids SCREAMING_SNAKE_CASE__ : Dict = use_labels SCREAMING_SNAKE_CASE__ : List[str] = vocab_size SCREAMING_SNAKE_CASE__ : Any = hidden_size SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads SCREAMING_SNAKE_CASE__ : Any = intermediate_size SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : List[Any] = max_position_embeddings SCREAMING_SNAKE_CASE__ : List[Any] = type_vocab_size SCREAMING_SNAKE_CASE__ : Any = type_sequence_label_size SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range SCREAMING_SNAKE_CASE__ : Optional[Any] = num_choices SCREAMING_SNAKE_CASE__ : Union[str, Any] = rescale_embeddings SCREAMING_SNAKE_CASE__ : Tuple = attention_type SCREAMING_SNAKE_CASE__ : List[Any] = use_bias SCREAMING_SNAKE_CASE__ : int = block_size SCREAMING_SNAKE_CASE__ : Dict = num_random_blocks def lowercase__ ( self : List[str] ): SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ : List[Any] = None if self.use_attention_mask: SCREAMING_SNAKE_CASE__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE__ : Optional[Any] = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) SCREAMING_SNAKE_CASE__ : Optional[int] = BigBirdConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , ) return config, input_ids, token_type_ids, attention_mask def lowercase__ ( self : Optional[int] ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = config_and_inputs SCREAMING_SNAKE_CASE__ : Dict = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask, } return config, inputs_dict @require_flax class lowercase ( _UpperCAmelCase , unittest.TestCase ): lowerCamelCase : str = ( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) lowerCamelCase : int = False lowerCamelCase : Optional[int] = False def lowercase__ ( self : Tuple ): SCREAMING_SNAKE_CASE__ : Tuple = FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowercase__ ( self : Optional[int] ): super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowercase__ ( self : Tuple ): super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowercase__ ( self : Optional[int] ): super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowercase__ ( self : Tuple ): super().test_hidden_states_output() @slow def lowercase__ ( self : Tuple ): for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Tuple = model_class_name.from_pretrained('''google/bigbird-roberta-base''' ) self.assertIsNotNone(_lowercase ) def lowercase__ ( self : Union[str, Any] ): if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowercase__ ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): SCREAMING_SNAKE_CASE__ : str = self._prepare_for_class(_lowercase , _lowercase ) SCREAMING_SNAKE_CASE__ : int = model_class(_lowercase ) @jax.jit def model_jitted(_lowercase : Optional[int] , _lowercase : List[Any]=None , **_lowercase : Tuple ): return model(input_ids=_lowercase , attention_mask=_lowercase , **_lowercase ) with self.subTest('''JIT Enabled''' ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_jitted(**_lowercase ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): SCREAMING_SNAKE_CASE__ : Optional[Any] = model_jitted(**_lowercase ).to_tuple() self.assertEqual(len(_lowercase ) , len(_lowercase ) ) for jitted_output, output in zip(_lowercase , _lowercase ): self.assertEqual(jitted_output.shape , output.shape ) def lowercase__ ( self : List[Any] , _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : str , _lowercase : Tuple=1E-5 , _lowercase : Dict="outputs" , _lowercase : Optional[Any]=None ): # `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version, # an effort was done to return `attention_probs` (yet to be verified). if name.startswith('''outputs.attentions''' ): return else: super().check_pt_flax_outputs(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
35
'''simple docstring''' import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase__ : List[Any] = logging.get_logger(__name__) UpperCAmelCase__ : List[str] = {"vocab_file": "vocab.json"} UpperCAmelCase__ : Optional[Any] = { "vocab_file": { "mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json", } } UpperCAmelCase__ : Union[str, Any] = {"mgp-str": 27} class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Any = VOCAB_FILES_NAMES snake_case__ :Dict = PRETRAINED_VOCAB_FILES_MAP snake_case__ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : int="[GO]" , __magic_name__ : Optional[Any]="[GO]" , __magic_name__ : List[str]="[s]" , __magic_name__ : str="[GO]" , **__magic_name__ : List[Any] ): """simple docstring""" super().__init__( unk_token=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , pad_token=__magic_name__ , **__magic_name__ , ) with open(__magic_name__ , encoding="utf-8" ) as vocab_handle: lowerCAmelCase__ = json.load(__magic_name__ ) lowerCAmelCase__ = {v: k for k, v in self.vocab.items()} @property def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" return len(self.vocab ) def __SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Dict ): """simple docstring""" lowerCAmelCase__ = [] for s in text: char_tokens.extend(__magic_name__ ) return char_tokens def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str ): """simple docstring""" return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) ) def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : Tuple ): """simple docstring""" return self.decoder.get(__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str , __magic_name__ : Optional[str] = None ): """simple docstring""" if not os.path.isdir(__magic_name__ ): logger.error("Vocabulary path ({}) should be a directory".format(__magic_name__ ) ) return lowerCAmelCase__ = os.path.join( __magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) with open(__magic_name__ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=__magic_name__ , ensure_ascii=__magic_name__ ) + "\n" ) return (vocab_file,)
48
0
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch __lowercase : Any = logging.get_logger(__name__) class _A : '''simple docstring''' def __init__( self ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ): '''simple docstring''' if not conversation_id: snake_case : str = uuid.uuida() if past_user_inputs is None: snake_case : Any = [] if generated_responses is None: snake_case : Dict = [] snake_case : uuid.UUID = conversation_id snake_case : List[str] = past_user_inputs snake_case : List[str] = generated_responses snake_case : Optional[str] = text def __eq__( self ,SCREAMING_SNAKE_CASE_ ): '''simple docstring''' if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = False ): '''simple docstring''' if self.new_user_input: if overwrite: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """ F"""with: \"{text}\".""" ) snake_case : Optional[Any] = text else: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """ F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" ) else: snake_case : Any = text def snake_case_ ( self ): '''simple docstring''' if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) snake_case : Tuple = None def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ): '''simple docstring''' self.generated_responses.append(SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ): '''simple docstring''' for user_input, generated_response in zip(self.past_user_inputs ,self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ): '''simple docstring''' snake_case : List[str] = F"""Conversation id: {self.uuid} \n""" for is_user, text in self.iter_texts(): snake_case : List[Any] = """user""" if is_user else """bot""" output += F"""{name} >> {text} \n""" return output @add_end_docstrings( snake_case , R''' min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. ''' , ) class _A ( snake_case ): '''simple docstring''' def __init__( self ,*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ): '''simple docstring''' super().__init__(*SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ) if self.tokenizer.pad_token_id is None: snake_case : int = self.tokenizer.eos_token def snake_case_ ( self ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=None ,**SCREAMING_SNAKE_CASE_ ): '''simple docstring''' snake_case : Union[str, Any] = {} snake_case : str = {} snake_case : List[Any] = {} if min_length_for_response is not None: snake_case : Optional[int] = min_length_for_response if minimum_tokens is not None: snake_case : Dict = minimum_tokens if "max_length" in generate_kwargs: snake_case : Optional[Any] = generate_kwargs["""max_length"""] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: snake_case : Union[str, Any] = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(SCREAMING_SNAKE_CASE_ ) return preprocess_params, forward_params, postprocess_params def __call__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=0 ,**SCREAMING_SNAKE_CASE_ ): '''simple docstring''' snake_case : Union[str, Any] = super().__call__(SCREAMING_SNAKE_CASE_ ,num_workers=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) == 1: return outputs[0] return outputs def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=32 ): '''simple docstring''' if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ): raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" ) if conversation.new_user_input is None: raise ValueError( F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """ """Add user inputs with the conversation's `add_user_input` method""" ) if hasattr(self.tokenizer ,"""_build_conversation_input_ids""" ): snake_case : Optional[Any] = self.tokenizer._build_conversation_input_ids(SCREAMING_SNAKE_CASE_ ) else: # If the tokenizer cannot handle conversations, we default to only the old version snake_case : List[str] = self._legacy_parse_and_tokenize(SCREAMING_SNAKE_CASE_ ) if self.framework == "pt": snake_case : Dict = torch.LongTensor([input_ids] ) elif self.framework == "tf": snake_case : Dict = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=10 ,**SCREAMING_SNAKE_CASE_ ): '''simple docstring''' snake_case : Dict = generate_kwargs.get("""max_length""" ,self.model.config.max_length ) snake_case : Any = model_inputs["""input_ids"""].shape[1] if max_length - minimum_tokens < n: logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" ) snake_case : List[str] = max_length - minimum_tokens snake_case : List[str] = model_inputs["""input_ids"""][:, -trim:] if "attention_mask" in model_inputs: snake_case : Dict = model_inputs["""attention_mask"""][:, -trim:] snake_case : Tuple = model_inputs.pop("""conversation""" ) snake_case : Tuple = max_length snake_case : List[Any] = self.model.generate(**SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ) if self.model.config.is_encoder_decoder: snake_case : Tuple = 1 else: snake_case : List[str] = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=True ): '''simple docstring''' snake_case : Optional[int] = model_outputs["""output_ids"""] snake_case : List[str] = self.tokenizer.decode( output_ids[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE_ ,clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ,) snake_case : List[str] = model_outputs["""conversation"""] conversation.mark_processed() conversation.append_response(SCREAMING_SNAKE_CASE_ ) return conversation def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ): '''simple docstring''' snake_case : int = self.tokenizer.eos_token_id snake_case : Tuple = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(SCREAMING_SNAKE_CASE_ ,add_special_tokens=SCREAMING_SNAKE_CASE_ ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(SCREAMING_SNAKE_CASE_ ,add_special_tokens=SCREAMING_SNAKE_CASE_ ) ) if len(SCREAMING_SNAKE_CASE_ ) > self.tokenizer.model_max_length: snake_case : Tuple = input_ids[-self.tokenizer.model_max_length :] return input_ids
36
'''simple docstring''' from math import sqrt def A ( UpperCamelCase_ : int ) -> int: '''simple docstring''' lowerCAmelCase__ = 0 for i in range(1 , int(sqrt(UpperCamelCase_ ) + 1 ) ): if n % i == 0 and i != sqrt(UpperCamelCase_ ): total += i + n // i elif i == sqrt(UpperCamelCase_ ): total += i return total - n def A ( UpperCamelCase_ : int = 1_00_00 ) -> int: '''simple docstring''' lowerCAmelCase__ = sum( i for i in range(1 , UpperCamelCase_ ) if sum_of_divisors(sum_of_divisors(UpperCamelCase_ ) ) == i and sum_of_divisors(UpperCamelCase_ ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
48
0
import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class A__ : """simple docstring""" def __init__( self : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : int=None , lowerCamelCase__ : int="resnet50" , lowerCamelCase__ : Union[str, Any]=3 , lowerCamelCase__ : str=32 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : str=True , lowerCamelCase__ : Union[str, Any]=True , ): a__ : Tuple = parent a__ : Any = out_indices if out_indices is not None else [4] a__ : str = stage_names a__ : Dict = out_features a__ : Dict = backbone a__ : Optional[Any] = batch_size a__ : int = image_size a__ : List[Any] = num_channels a__ : List[Any] = use_pretrained_backbone a__ : List[Any] = is_training def _UpperCamelCase( self : Optional[Any] ): a__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a__ : Tuple = self.get_config() return config, pixel_values def _UpperCamelCase( self : Optional[Any] ): return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any ): a__ : Tuple = TimmBackbone(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() with torch.no_grad(): a__ : Dict = model(lowerCamelCase__ ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def _UpperCamelCase( self : int ): a__ : int = self.prepare_config_and_inputs() a__, a__ : List[str] = config_and_inputs a__ : Optional[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch @require_timm class A__ ( A__ , A__ , A__ , unittest.TestCase ): """simple docstring""" _lowercase = (TimmBackbone,) if is_torch_available() else () _lowercase = {'feature-extraction': TimmBackbone} if is_torch_available() else {} _lowercase = False _lowercase = False _lowercase = False _lowercase = False def _UpperCamelCase( self : Tuple ): a__ : str = TimmBackboneModelTester(self ) a__ : List[Any] = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ ) def _UpperCamelCase( self : List[str] ): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _UpperCamelCase( self : Union[str, Any] ): a__ : Optional[Any] = "resnet18" a__ : List[Any] = "microsoft/resnet-18" a__ : str = AutoBackbone.from_pretrained(lowerCamelCase__ , use_timm_backbone=lowerCamelCase__ ) a__ : List[str] = AutoBackbone.from_pretrained(lowerCamelCase__ ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) a__ : List[Any] = AutoBackbone.from_pretrained(lowerCamelCase__ , use_timm_backbone=lowerCamelCase__ , out_indices=[1, 2, 3] ) a__ : int = AutoBackbone.from_pretrained(lowerCamelCase__ , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip("TimmBackbone doesn't support feed forward chunking" ) def _UpperCamelCase( self : Any ): pass @unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" ) def _UpperCamelCase( self : int ): pass @unittest.skip("TimmBackbone initialization is managed on the timm side" ) def _UpperCamelCase( self : Union[str, Any] ): pass @unittest.skip("TimmBackbone models doesn't have inputs_embeds" ) def _UpperCamelCase( self : int ): pass @unittest.skip("TimmBackbone models doesn't have inputs_embeds" ) def _UpperCamelCase( self : Optional[int] ): pass @unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" ) def _UpperCamelCase( self : Optional[int] ): pass @unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" ) def _UpperCamelCase( self : int ): pass @unittest.skip("model weights aren't tied in TimmBackbone." ) def _UpperCamelCase( self : Optional[Any] ): pass @unittest.skip("model weights aren't tied in TimmBackbone." ) def _UpperCamelCase( self : List[Any] ): pass @unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" ) def _UpperCamelCase( self : Optional[int] ): pass @unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" ) def _UpperCamelCase( self : int ): pass @unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." ) def _UpperCamelCase( self : List[str] ): pass @unittest.skip("TimmBackbone doesn't support output_attentions." ) def _UpperCamelCase( self : Tuple ): pass @unittest.skip("Safetensors is not supported by timm." ) def _UpperCamelCase( self : Union[str, Any] ): pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def _UpperCamelCase( self : Any ): pass def _UpperCamelCase( self : Union[str, Any] ): a__, a__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Tuple = model_class(lowerCamelCase__ ) a__ : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : List[Any] = [*signature.parameters.keys()] a__ : Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) def _UpperCamelCase( self : Dict ): a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() a__ : List[Any] = True a__ : List[str] = self.has_attentions # no need to test all models as different heads yield the same functionality a__ : Union[str, Any] = self.all_model_classes[0] a__ : Dict = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) a__ : int = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) a__ : Dict = model(**lowerCamelCase__ ) a__ : int = outputs[0][-1] # Encoder-/Decoder-only models a__ : List[str] = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: a__ : str = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=lowerCamelCase__ ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def _UpperCamelCase( self : Tuple ): a__, a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Tuple = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : List[str] = model(**lowerCamelCase__ ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None a__ : Tuple = copy.deepcopy(lowerCamelCase__ ) a__ : Any = None a__ : Optional[Any] = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Optional[int] = model(**lowerCamelCase__ ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights a__ : Any = copy.deepcopy(lowerCamelCase__ ) a__ : List[str] = False a__ : Union[str, Any] = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() a__ : Optional[int] = model(**lowerCamelCase__ )
37
'''simple docstring''' import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format="%(message)s") def A ( UpperCamelCase_ : np.ndarray ) -> np.ndarray: '''simple docstring''' return input_array.reshape((input_array.size, 1) ) def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray: '''simple docstring''' lowerCAmelCase__ = np.nan for i in range(UpperCamelCase_ ): lowerCAmelCase__ = features[:, labels == i] lowerCAmelCase__ = data.mean(1 ) # Centralize the data of class i lowerCAmelCase__ = data - column_reshape(UpperCamelCase_ ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(UpperCamelCase_ , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) lowerCAmelCase__ = np.dot(UpperCamelCase_ , centered_data.T ) return covariance_sum / features.shape[1] def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray: '''simple docstring''' lowerCAmelCase__ = features.mean(1 ) lowerCAmelCase__ = np.nan for i in range(UpperCamelCase_ ): lowerCAmelCase__ = features[:, labels == i] lowerCAmelCase__ = data.shape[1] lowerCAmelCase__ = data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ ) , (column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) lowerCAmelCase__ = device_data * np.dot( column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ ) , (column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ )).T , ) return covariance_sum / features.shape[1] def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray: '''simple docstring''' if features.any(): lowerCAmelCase__ = features.mean(1 ) # Center the dataset lowerCAmelCase__ = features - np.reshape(UpperCamelCase_ , (data_mean.size, 1) ) lowerCAmelCase__ = np.dot(UpperCamelCase_ , centered_data.T ) / features.shape[1] lowerCAmelCase__ ,lowerCAmelCase__ = np.linalg.eigh(UpperCamelCase_ ) # Take all the columns in the reverse order (-1), and then takes only the first lowerCAmelCase__ = eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space lowerCAmelCase__ = np.dot(filtered_eigenvectors.T , UpperCamelCase_ ) logging.info("Principal Component Analysis computed" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=UpperCamelCase_ ) logging.error("Dataset empty" ) raise AssertionError def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> np.ndarray: '''simple docstring''' assert classes > dimensions # Check if features have been already loaded if features.any: lowerCAmelCase__ ,lowerCAmelCase__ = eigh( covariance_between_classes(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , covariance_within_classes(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , ) lowerCAmelCase__ = eigenvectors[:, ::-1][:, :dimensions] lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = np.linalg.svd(UpperCamelCase_ ) lowerCAmelCase__ = svd_matrix[:, 0:dimensions] lowerCAmelCase__ = np.dot(filtered_svd_matrix.T , UpperCamelCase_ ) logging.info("Linear Discriminant Analysis computed" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=UpperCamelCase_ ) logging.error("Dataset empty" ) raise AssertionError def A ( ) -> None: '''simple docstring''' lowerCAmelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) lowerCAmelCase__ = np.array([0, 0, 0, 1, 1] ) lowerCAmelCase__ = 2 lowerCAmelCase__ = 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(UpperCamelCase_ ) as error_info: lowerCAmelCase__ = linear_discriminant_analysis( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) if isinstance(UpperCamelCase_ , np.ndarray ): raise AssertionError( "Did not raise AssertionError for dimensions > classes" ) assert error_info.type is AssertionError def A ( ) -> None: '''simple docstring''' lowerCAmelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) lowerCAmelCase__ = 2 lowerCAmelCase__ = np.array([[6.92_820_323, 8.66_025_404, 10.39_230_485], [3.0, 3.0, 3.0]] ) with pytest.raises(UpperCamelCase_ ) as error_info: lowerCAmelCase__ = principal_component_analysis(UpperCamelCase_ , UpperCamelCase_ ) if not np.allclose(UpperCamelCase_ , UpperCamelCase_ ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
48
0
'''simple docstring''' import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( """kwargs, expected""" , [ ({"""num_shards""": 0, """max_num_jobs""": 1}, []), ({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]), ({"""num_shards""": 10, """max_num_jobs""": 10}, [range(__magic_name__ , i + 1 ) for i in range(10 )]), ({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]), ({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]), ({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]), ] , ) def UpperCamelCase__ ( __magic_name__ : Dict , __magic_name__ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' snake_case__ : List[str] = _distribute_shards(**__magic_name__ ) assert out == expected @pytest.mark.parametrize( """gen_kwargs, max_num_jobs, expected""" , [ ({"""foo""": 0}, 10, [{"""foo""": 0}]), ({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]), ({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]), ({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]), ({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]), ] , ) def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : List[Any] ) -> int: '''simple docstring''' snake_case__ : Dict = _split_gen_kwargs(__magic_name__ , __magic_name__ ) assert out == expected @pytest.mark.parametrize( """gen_kwargs, expected""" , [ ({"""foo""": 0}, 1), ({"""shards""": [0]}, 1), ({"""shards""": [0, 1, 2, 3]}, 4), ({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4), ({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4), ({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError), ] , ) def UpperCamelCase__ ( __magic_name__ : List[str] , __magic_name__ : Tuple ) -> List[str]: '''simple docstring''' if expected is RuntimeError: with pytest.raises(__magic_name__ ): _number_of_shards_in_gen_kwargs(__magic_name__ ) else: snake_case__ : Any = _number_of_shards_in_gen_kwargs(__magic_name__ ) assert out == expected
38
'''simple docstring''' def A ( UpperCamelCase_ : str , UpperCamelCase_ : int ) -> list: '''simple docstring''' lowerCAmelCase__ = word.split() def justify(UpperCamelCase_ : list , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> str: lowerCAmelCase__ = max_width - width lowerCAmelCase__ = len(UpperCamelCase_ ) if len(UpperCamelCase_ ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: lowerCAmelCase__ = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] lowerCAmelCase__ = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] lowerCAmelCase__ = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(UpperCamelCase_ ): num_spaces_between_words_list[i] += 1 lowerCAmelCase__ = [] for i in range(UpperCamelCase_ ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * " " ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(UpperCamelCase_ ) lowerCAmelCase__ = [] lowerCAmelCase__ = [] lowerCAmelCase__ = 0 for word in words: if width + len(UpperCamelCase_ ) + len(UpperCamelCase_ ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(UpperCamelCase_ ) width += len(UpperCamelCase_ ) else: # justify the line and add it to result answer.append(justify(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) ) # reset new line and new width lowerCAmelCase__ ,lowerCAmelCase__ = [word], len(UpperCamelCase_ ) lowerCAmelCase__ = max_width - width - len(UpperCamelCase_ ) answer.append(" ".join(UpperCamelCase_ ) + (remaining_spaces + 1) * " " ) return answer if __name__ == "__main__": from doctest import testmod testmod()
48
0
import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging lowerCAmelCase_ = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt'''] lowerCAmelCase_ = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse('''0.9.0'''): raise Exception('''requires fairseq >= 0.9.0''') logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = ''' Hello world! cécé herlolip''' lowerCAmelCase_ = [ ('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''), ('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''), ('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''), ('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''), ] def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', ] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = dct.pop(SCREAMING_SNAKE_CASE__ ) snake_case_ = val def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = torch.load(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' ) snake_case_ = torch.hub.load('''pytorch/fairseq''' , '''bart.large.cnn''' ).eval() hub_interface.model.load_state_dict(sd['''model'''] ) return hub_interface def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_, snake_case_ = emb.weight.shape snake_case_ = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ ) snake_case_ = emb.weight.data return lin_layer @torch.no_grad() def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ): if not os.path.exists(SCREAMING_SNAKE_CASE__ ): snake_case_ = torch.hub.load('''pytorch/fairseq''' , SCREAMING_SNAKE_CASE__ ).eval() else: snake_case_ = load_xsum_checkpoint(SCREAMING_SNAKE_CASE__ ) bart.model.upgrade_state_dict(bart.model.state_dict() ) if hf_checkpoint_name is None: snake_case_ = checkpoint_path.replace('''.''' , '''-''' ) snake_case_ = BartConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) snake_case_ = bart.encode(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 ) snake_case_ = BartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ).encode(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).unsqueeze(0 ) if not torch.eq(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).all(): raise ValueError( F'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' ) if checkpoint_path == "bart.large.mnli": snake_case_ = bart.state_dict() remove_ignore_keys_(SCREAMING_SNAKE_CASE__ ) snake_case_ = state_dict['''model.decoder.embed_tokens.weight'''] for src, dest in mnli_rename_keys: rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case_ = BartForSequenceClassification(SCREAMING_SNAKE_CASE__ ).eval() model.load_state_dict(SCREAMING_SNAKE_CASE__ ) snake_case_ = bart.predict('''mnli''' , SCREAMING_SNAKE_CASE__ , return_logits=SCREAMING_SNAKE_CASE__ ) snake_case_ = model(SCREAMING_SNAKE_CASE__ )[0] # logits else: # no classification heads to worry about snake_case_ = bart.model.state_dict() remove_ignore_keys_(SCREAMING_SNAKE_CASE__ ) snake_case_ = state_dict['''decoder.embed_tokens.weight'''] snake_case_ = bart.extract_features(SCREAMING_SNAKE_CASE__ ) if hf_checkpoint_name == "facebook/bart-large": snake_case_ = BartModel(SCREAMING_SNAKE_CASE__ ).eval() model.load_state_dict(SCREAMING_SNAKE_CASE__ ) snake_case_ = model(SCREAMING_SNAKE_CASE__ ).model[0] else: snake_case_ = BartForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval() # an existing summarization ckpt model.model.load_state_dict(SCREAMING_SNAKE_CASE__ ) if hasattr(SCREAMING_SNAKE_CASE__ , '''lm_head''' ): snake_case_ = make_linear_from_emb(model.model.shared ) snake_case_ = model.model(SCREAMING_SNAKE_CASE__ )[0] # Check results if fairseq_output.shape != new_model_outputs.shape: raise ValueError( F'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' ) if (fairseq_output != new_model_outputs).any().item(): raise ValueError('''Some values in `fairseq_output` are different from `new_model_outputs`''' ) Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum''' ) lowerCAmelCase_ = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
39
'''simple docstring''' import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 UpperCAmelCase__ : str = sys.version_info >= (3, 10) def A ( UpperCamelCase_ : Any=None , UpperCamelCase_ : List[Any]=None ) -> Optional[int]: '''simple docstring''' return field(default_factory=lambda: default , metadata=UpperCamelCase_ ) @dataclass class A : snake_case__ :int snake_case__ :float snake_case__ :str snake_case__ :bool @dataclass class A : snake_case__ :int = 42 snake_case__ :str = field(default='toto' , metadata={'help': 'help message'} ) @dataclass class A : snake_case__ :bool = False snake_case__ :bool = True snake_case__ :Optional[bool] = None class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Any = 'titi' snake_case__ :Optional[int] = 'toto' class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Union[str, Any] = 'titi' snake_case__ :str = 'toto' snake_case__ :int = 42 @dataclass class A : snake_case__ :BasicEnum = "toto" def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = BasicEnum(self.foo ) @dataclass class A : snake_case__ :MixedTypeEnum = "toto" def __SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" lowerCAmelCase__ = MixedTypeEnum(self.foo ) @dataclass class A : snake_case__ :Optional[int] = None snake_case__ :Optional[float] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} ) snake_case__ :Optional[str] = None snake_case__ :Optional[List[str]] = list_field(default=[] ) snake_case__ :Optional[List[int]] = list_field(default=[] ) @dataclass class A : snake_case__ :List[int] = list_field(default=[] ) snake_case__ :List[int] = list_field(default=[1, 2, 3] ) snake_case__ :List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) snake_case__ :List[float] = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class A : snake_case__ :List[int] = field() snake_case__ :str = field() snake_case__ :BasicEnum = field() def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" lowerCAmelCase__ = BasicEnum(self.required_enum ) @dataclass class A : snake_case__ :int snake_case__ :"BasicEnum" = field() snake_case__ :"Optional[bool]" = None snake_case__ :"str" = field(default='toto' , metadata={'help': 'help message'} ) snake_case__ :"List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) if is_python_no_less_than_3_10: @dataclass class A : snake_case__ :bool = False snake_case__ :bool = True snake_case__ :bool | None = None @dataclass class A : snake_case__ :int | None = None snake_case__ :float | None = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} ) snake_case__ :str | None = None snake_case__ :list[str] | None = list_field(default=[] ) snake_case__ :list[int] | None = list_field(default=[] ) class A ( unittest.TestCase ): def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : argparse.ArgumentParser , __magic_name__ : argparse.ArgumentParser ): """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): lowerCAmelCase__ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"} lowerCAmelCase__ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get("choices" , __magic_name__ ) and yy.get("choices" , __magic_name__ ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx["type"](__magic_name__ ) , yy["type"](__magic_name__ ) ) del xx["type"], yy["type"] self.assertEqual(__magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ ) expected.add_argument("--bar" , type=__magic_name__ , required=__magic_name__ ) expected.add_argument("--baz" , type=__magic_name__ , required=__magic_name__ ) expected.add_argument("--flag" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" ) self.argparsersEqual(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = ["--foo", "1", "--baz", "quux", "--bar", "0.5"] ((lowerCAmelCase__) ,) = parser.parse_args_into_dataclasses(__magic_name__ , look_for_args_file=__magic_name__ ) self.assertFalse(example.flag ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument("--foo" , default=42 , type=__magic_name__ ) expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" ) self.argparsersEqual(__magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument("--foo" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" ) expected.add_argument("--baz" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument("--no_baz" , action="store_false" , default=__magic_name__ , dest="baz" ) expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ ) lowerCAmelCase__ = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(__magic_name__ ) for dataclass_type in dataclass_types: lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) self.argparsersEqual(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = parser.parse_args([] ) self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) ) lowerCAmelCase__ = parser.parse_args(["--foo", "--no_baz"] ) self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) ) lowerCAmelCase__ = parser.parse_args(["--foo", "--baz"] ) self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) ) lowerCAmelCase__ = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] ) self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) ) lowerCAmelCase__ = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] ) self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument( "--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , ) self.argparsersEqual(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = parser.parse_args([] ) self.assertEqual(args.foo , "toto" ) lowerCAmelCase__ = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) lowerCAmelCase__ = parser.parse_args(["--foo", "titi"] ) self.assertEqual(args.foo , "titi" ) lowerCAmelCase__ = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) lowerCAmelCase__ = parser.parse_args(["--foo", "42"] ) self.assertEqual(args.foo , 42 ) lowerCAmelCase__ = parser.parse_args_into_dataclasses(["--foo", "42"] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def __SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" @dataclass class A : snake_case__ :Literal["titi", "toto", 42] = "toto" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument( "--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , ) self.argparsersEqual(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = parser.parse_args([] ) self.assertEqual(args.foo , "toto" ) lowerCAmelCase__ = parser.parse_args(["--foo", "titi"] ) self.assertEqual(args.foo , "titi" ) lowerCAmelCase__ = parser.parse_args(["--foo", "42"] ) self.assertEqual(args.foo , 42 ) def __SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument("--foo_int" , nargs="+" , default=[] , type=__magic_name__ ) expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=__magic_name__ ) expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ ) expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=__magic_name__ ) self.argparsersEqual(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = parser.parse_args([] ) self.assertEqual( __magic_name__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , ) lowerCAmelCase__ = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() ) self.assertEqual(__magic_name__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) ) def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument("--foo" , default=__magic_name__ , type=__magic_name__ ) expected.add_argument("--bar" , default=__magic_name__ , type=__magic_name__ , help="help message" ) expected.add_argument("--baz" , default=__magic_name__ , type=__magic_name__ ) expected.add_argument("--ces" , nargs="+" , default=[] , type=__magic_name__ ) expected.add_argument("--des" , nargs="+" , default=[] , type=__magic_name__ ) lowerCAmelCase__ = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(__magic_name__ ) for dataclass_type in dataclass_types: lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) self.argparsersEqual(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = parser.parse_args([] ) self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , bar=__magic_name__ , baz=__magic_name__ , ces=[] , des=[] ) ) lowerCAmelCase__ = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() ) self.assertEqual(__magic_name__ , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) ) def __SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument("--required_list" , nargs="+" , type=__magic_name__ , required=__magic_name__ ) expected.add_argument("--required_str" , type=__magic_name__ , required=__magic_name__ ) expected.add_argument( "--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , ) self.argparsersEqual(__magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = argparse.ArgumentParser() expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ ) expected.add_argument( "--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , ) expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ ) expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" ) expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ ) self.argparsersEqual(__magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } lowerCAmelCase__ = parser.parse_dict(__magic_name__ )[0] lowerCAmelCase__ = BasicExample(**__magic_name__ ) self.assertEqual(__magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, "extra": 42, } self.assertRaises(__magic_name__ , parser.parse_dict , __magic_name__ , allow_extra_keys=__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase__ = os.path.join(__magic_name__ , "temp_json" ) os.mkdir(__magic_name__ ) with open(temp_local_path + ".json" , "w+" ) as f: json.dump(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0] lowerCAmelCase__ = BasicExample(**__magic_name__ ) self.assertEqual(__magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) lowerCAmelCase__ = { "foo": 12, "bar": 3.14, "baz": "42", "flag": True, } with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase__ = os.path.join(__magic_name__ , "temp_yaml" ) os.mkdir(__magic_name__ ) with open(temp_local_path + ".yaml" , "w+" ) as f: yaml.dump(__magic_name__ , __magic_name__ ) lowerCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0] lowerCAmelCase__ = BasicExample(**__magic_name__ ) self.assertEqual(__magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser(__magic_name__ ) self.assertIsNotNone(__magic_name__ )
48
0
def UpperCamelCase ( snake_case__ : Optional[int] ) -> str: UpperCamelCase : List[str] = [0] * len(snake_case__ ) UpperCamelCase : int = [] UpperCamelCase : Optional[int] = [1] * len(snake_case__ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(snake_case__ ) ): if indegree[i] == 0: queue.append(snake_case__ ) while queue: UpperCamelCase : Optional[int] = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: UpperCamelCase : Tuple = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(snake_case__ ) print(max(snake_case__ ) ) # Adjacency list of Graph __UpperCAmelCase = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
40
'''simple docstring''' import sys from collections import defaultdict class A : def __init__( self : Any ): """simple docstring""" lowerCAmelCase__ = [] def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[Any] ): """simple docstring""" return self.node_position[vertex] def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[str] , __magic_name__ : List[str] ): """simple docstring""" lowerCAmelCase__ = pos def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] ): """simple docstring""" if start > size // 2 - 1: return else: if 2 * start + 2 >= size: lowerCAmelCase__ = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: lowerCAmelCase__ = 2 * start + 1 else: lowerCAmelCase__ = 2 * start + 2 if heap[smallest_child] < heap[start]: lowerCAmelCase__ ,lowerCAmelCase__ = heap[smallest_child], positions[smallest_child] lowerCAmelCase__ ,lowerCAmelCase__ = ( heap[start], positions[start], ) lowerCAmelCase__ ,lowerCAmelCase__ = temp, tempa lowerCAmelCase__ = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , __magic_name__ ) self.top_to_bottom(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : List[str] ): """simple docstring""" lowerCAmelCase__ = position[index] while index != 0: lowerCAmelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: lowerCAmelCase__ = heap[parent] lowerCAmelCase__ = position[parent] self.set_position(position[parent] , __magic_name__ ) else: lowerCAmelCase__ = val lowerCAmelCase__ = temp self.set_position(__magic_name__ , __magic_name__ ) break lowerCAmelCase__ = parent else: lowerCAmelCase__ = val lowerCAmelCase__ = temp self.set_position(__magic_name__ , 0 ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : int ): """simple docstring""" lowerCAmelCase__ = len(__magic_name__ ) // 2 - 1 for i in range(__magic_name__ , -1 , -1 ): self.top_to_bottom(__magic_name__ , __magic_name__ , len(__magic_name__ ) , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ): """simple docstring""" lowerCAmelCase__ = positions[0] lowerCAmelCase__ = sys.maxsize self.top_to_bottom(__magic_name__ , 0 , len(__magic_name__ ) , __magic_name__ ) return temp def A ( UpperCamelCase_ : List[Any] ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase__ = Heap() lowerCAmelCase__ = [0] * len(UpperCamelCase_ ) lowerCAmelCase__ = [-1] * len(UpperCamelCase_ ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph lowerCAmelCase__ = [] # Heap of Distance of vertices from their neighboring vertex lowerCAmelCase__ = [] for vertex in range(len(UpperCamelCase_ ) ): distance_tv.append(sys.maxsize ) positions.append(UpperCamelCase_ ) heap.node_position.append(UpperCamelCase_ ) lowerCAmelCase__ = [] lowerCAmelCase__ = 1 lowerCAmelCase__ = sys.maxsize for neighbor, distance in adjacency_list[0]: lowerCAmelCase__ = 0 lowerCAmelCase__ = distance heap.heapify(UpperCamelCase_ , UpperCamelCase_ ) for _ in range(1 , len(UpperCamelCase_ ) ): lowerCAmelCase__ = heap.delete_minimum(UpperCamelCase_ , UpperCamelCase_ ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) lowerCAmelCase__ = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(UpperCamelCase_ )] ): lowerCAmelCase__ = distance heap.bottom_to_top( UpperCamelCase_ , heap.get_position(UpperCamelCase_ ) , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > UpperCAmelCase__ : Optional[int] = int(input("Enter number of edges: ").strip()) UpperCAmelCase__ : str = defaultdict(list) for _ in range(edges_number): UpperCAmelCase__ : int = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
48
0
'''simple docstring''' from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
41
'''simple docstring''' import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase__ : Tuple = get_tests_dir("fixtures/test_sentencepiece.model") if is_sentencepiece_available(): import sentencepiece as sp UpperCAmelCase__ : Tuple = 5 UpperCAmelCase__ : List[Any] = 10 @require_sentencepiece @require_tokenizers class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): snake_case__ :Tuple = SpeechaTextTokenizer snake_case__ :Dict = False snake_case__ :Optional[int] = True def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" super().setUp() lowerCAmelCase__ = sp.SentencePieceProcessor() spm_model.Load(__magic_name__ ) lowerCAmelCase__ = ["<s>", "<pad>", "</s>", "<unk>"] vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__magic_name__ ) )] lowerCAmelCase__ = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) lowerCAmelCase__ = Path(self.tmpdirname ) save_json(__magic_name__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(__magic_name__ , save_dir / VOCAB_FILES_NAMES["spm_file"] ) lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def __SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" lowerCAmelCase__ = "<pad>" lowerCAmelCase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(__magic_name__ ) , 1001 ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1001 ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) lowerCAmelCase__ = tokenizer.tokenize("This is a test" ) self.assertListEqual(__magic_name__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__magic_name__ ) , [289, 50, 14, 174, 386] , ) lowerCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( __magic_name__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , ) lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(__magic_name__ ) self.assertListEqual(__magic_name__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] ) lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(__magic_name__ ) self.assertListEqual( __magic_name__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , ) @slow def __SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" lowerCAmelCase__ = {"input_ids": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__magic_name__ , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , ) @require_sentencepiece class A ( unittest.TestCase ): snake_case__ :Union[str, Any] = 'valhalla/s2t_mustc_multilinguial_medium' snake_case__ :Tuple = 'C\'est trop cool' snake_case__ :List[str] = 'Esto es genial' @classmethod def __SCREAMING_SNAKE_CASE ( cls : List[Any] ): """simple docstring""" lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name ) return cls def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 ) self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 ) self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 ) self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 ) def __SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" self.assertEqual(self.tokenizer.vocab_size , 10000 ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" self.assertIn(__magic_name__ , self.tokenizer.all_special_ids ) lowerCAmelCase__ = [ES_CODE, 4, 1601, 47, 7647, 2] lowerCAmelCase__ = self.tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ ) lowerCAmelCase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__magic_name__ ) self.assertEqual(__magic_name__ , __magic_name__ ) self.assertNotIn(self.tokenizer.eos_token , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" lowerCAmelCase__ = "fr" lowerCAmelCase__ = self.tokenizer(self.french_text ).input_ids self.assertEqual(encoded[0] , __magic_name__ ) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id ) def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" lowerCAmelCase__ = "fr" self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] ) lowerCAmelCase__ = "es" self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
48
0
'''simple docstring''' import argparse import datetime def _UpperCamelCase ( __UpperCamelCase ) -> str: lowerCamelCase_ = { '0': 'Sunday', '1': 'Monday', '2': 'Tuesday', '3': 'Wednesday', '4': 'Thursday', '5': 'Friday', '6': 'Saturday', } lowerCamelCase_ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(__UpperCamelCase ) < 11: raise ValueError('Must be 10 characters long' ) # Get month lowerCamelCase_ = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError('Month must be between 1 - 12' ) lowerCamelCase_ = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError('Date separator must be \'-\' or \'/\'' ) # Get day lowerCamelCase_ = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError('Date must be between 1 - 31' ) # Get second separator lowerCamelCase_ = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError('Date separator must be \'-\' or \'/\'' ) # Get year lowerCamelCase_ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 85_00: raise ValueError( 'Year out of range. There has to be some sort of limit...right?' ) # Get datetime obj for validation lowerCamelCase_ = datetime.date(int(__UpperCamelCase ) ,int(__UpperCamelCase ) ,int(__UpperCamelCase ) ) # Start math if m <= 2: lowerCamelCase_ = y - 1 lowerCamelCase_ = m + 12 # maths var lowerCamelCase_ = int(str(__UpperCamelCase )[:2] ) lowerCamelCase_ = int(str(__UpperCamelCase )[2:] ) lowerCamelCase_ = int(2.6 * m - 5.39 ) lowerCamelCase_ = int(c / 4 ) lowerCamelCase_ = int(k / 4 ) lowerCamelCase_ = int(d + k ) lowerCamelCase_ = int(t + u + v + x ) lowerCamelCase_ = int(z - (2 * c) ) lowerCamelCase_ = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError('The date was evaluated incorrectly. Contact developer.' ) # Response lowerCamelCase_ = f'''Your date {date_input}, is a {days[str(__UpperCamelCase )]}!''' return response if __name__ == "__main__": import doctest doctest.testmod() A_ = argparse.ArgumentParser( description=( "Find out what day of the week nearly any date is or was. Enter " "date as a string in the mm-dd-yyyy or mm/dd/yyyy format" ) ) parser.add_argument( "date_input", type=str, help="Date as a string (mm-dd-yyyy or mm/dd/yyyy)" ) A_ = parser.parse_args() zeller(args.date_input)
42
'''simple docstring''' from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig UpperCAmelCase__ : Tuple = logging.get_logger(__name__) # General docstring UpperCAmelCase__ : int = "RegNetConfig" # Base docstring UpperCAmelCase__ : Optional[int] = "facebook/regnet-y-040" UpperCAmelCase__ : Optional[int] = [1, 10_88, 7, 7] # Image classification docstring UpperCAmelCase__ : Tuple = "facebook/regnet-y-040" UpperCAmelCase__ : Optional[Any] = "tabby, tabby cat" UpperCAmelCase__ : int = [ "facebook/regnet-y-040", # See all regnet models at https://huggingface.co/models?filter=regnet ] class A ( tf.keras.layers.Layer ): def __init__( self : str , __magic_name__ : int , __magic_name__ : int = 3 , __magic_name__ : int = 1 , __magic_name__ : int = 1 , __magic_name__ : Optional[str] = "relu" , **__magic_name__ : int , ): """simple docstring""" super().__init__(**__magic_name__ ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb lowerCAmelCase__ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) lowerCAmelCase__ = tf.keras.layers.ConvaD( filters=__magic_name__ , kernel_size=__magic_name__ , strides=__magic_name__ , padding="VALID" , groups=__magic_name__ , use_bias=__magic_name__ , name="convolution" , ) lowerCAmelCase__ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) lowerCAmelCase__ = ACTaFN[activation] if activation is not None else tf.identity def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ): """simple docstring""" lowerCAmelCase__ = self.convolution(self.padding(__magic_name__ ) ) lowerCAmelCase__ = self.normalization(__magic_name__ ) lowerCAmelCase__ = self.activation(__magic_name__ ) return hidden_state class A ( tf.keras.layers.Layer ): def __init__( self : List[Any] , __magic_name__ : RegNetConfig , **__magic_name__ : str ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = config.num_channels lowerCAmelCase__ = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , ) def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : List[Any] ): """simple docstring""" lowerCAmelCase__ = shape_list(__magic_name__ )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 2, 3, 1) ) lowerCAmelCase__ = self.embedder(__magic_name__ ) return hidden_state class A ( tf.keras.layers.Layer ): def __init__( self : Any , __magic_name__ : int , __magic_name__ : int = 2 , **__magic_name__ : Optional[Any] ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = tf.keras.layers.ConvaD( filters=__magic_name__ , kernel_size=1 , strides=__magic_name__ , use_bias=__magic_name__ , name="convolution" ) lowerCAmelCase__ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : tf.Tensor , __magic_name__ : bool = False ): """simple docstring""" return self.normalization(self.convolution(__magic_name__ ) , training=__magic_name__ ) class A ( tf.keras.layers.Layer ): def __init__( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : int , **__magic_name__ : List[Any] ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__magic_name__ , name="pooler" ) lowerCAmelCase__ = [ tf.keras.layers.ConvaD(filters=__magic_name__ , kernel_size=1 , activation="relu" , name="attention.0" ), tf.keras.layers.ConvaD(filters=__magic_name__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ), ] def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = self.pooler(__magic_name__ ) for layer_module in self.attention: lowerCAmelCase__ = layer_module(__magic_name__ ) lowerCAmelCase__ = hidden_state * pooled return hidden_state class A ( tf.keras.layers.Layer ): def __init__( self : int , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 1 , **__magic_name__ : str ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = in_channels != out_channels or stride != 1 lowerCAmelCase__ = max(1 , out_channels // config.groups_width ) lowerCAmelCase__ = ( TFRegNetShortCut(__magic_name__ , stride=__magic_name__ , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. lowerCAmelCase__ = [ TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( __magic_name__ , stride=__magic_name__ , groups=__magic_name__ , activation=config.hidden_act , name="layer.1" ), TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=__magic_name__ , name="layer.2" ), ] lowerCAmelCase__ = ACTaFN[config.hidden_act] def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Any ): """simple docstring""" lowerCAmelCase__ = hidden_state for layer_module in self.layers: lowerCAmelCase__ = layer_module(__magic_name__ ) lowerCAmelCase__ = self.shortcut(__magic_name__ ) hidden_state += residual lowerCAmelCase__ = self.activation(__magic_name__ ) return hidden_state class A ( tf.keras.layers.Layer ): def __init__( self : int , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 1 , **__magic_name__ : str ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = in_channels != out_channels or stride != 1 lowerCAmelCase__ = max(1 , out_channels // config.groups_width ) lowerCAmelCase__ = ( TFRegNetShortCut(__magic_name__ , stride=__magic_name__ , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) lowerCAmelCase__ = [ TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( __magic_name__ , stride=__magic_name__ , groups=__magic_name__ , activation=config.hidden_act , name="layer.1" ), TFRegNetSELayer(__magic_name__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ), TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=__magic_name__ , name="layer.3" ), ] lowerCAmelCase__ = ACTaFN[config.hidden_act] def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : Any ): """simple docstring""" lowerCAmelCase__ = hidden_state for layer_module in self.layers: lowerCAmelCase__ = layer_module(__magic_name__ ) lowerCAmelCase__ = self.shortcut(__magic_name__ ) hidden_state += residual lowerCAmelCase__ = self.activation(__magic_name__ ) return hidden_state class A ( tf.keras.layers.Layer ): def __init__( self : Union[str, Any] , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 2 , __magic_name__ : int = 2 , **__magic_name__ : Optional[int] ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer lowerCAmelCase__ = [ # downsampling is done in the first layer with stride of 2 layer(__magic_name__ , __magic_name__ , __magic_name__ , stride=__magic_name__ , name="layers.0" ), *[layer(__magic_name__ , __magic_name__ , __magic_name__ , name=f"""layers.{i+1}""" ) for i in range(depth - 1 )], ] def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[str] ): """simple docstring""" for layer_module in self.layers: lowerCAmelCase__ = layer_module(__magic_name__ ) return hidden_state class A ( tf.keras.layers.Layer ): def __init__( self : Tuple , __magic_name__ : RegNetConfig , **__magic_name__ : Union[str, Any] ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( __magic_name__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) ) lowerCAmelCase__ = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(__magic_name__ , config.depths[1:] ) ): self.stages.append(TFRegNetStage(__magic_name__ , __magic_name__ , __magic_name__ , depth=__magic_name__ , name=f"""stages.{i+1}""" ) ) def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : tf.Tensor , __magic_name__ : bool = False , __magic_name__ : bool = True ): """simple docstring""" lowerCAmelCase__ = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: lowerCAmelCase__ = hidden_states + (hidden_state,) lowerCAmelCase__ = stage_module(__magic_name__ ) if output_hidden_states: lowerCAmelCase__ = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=__magic_name__ , hidden_states=__magic_name__ ) @keras_serializable class A ( tf.keras.layers.Layer ): snake_case__ :List[Any] = RegNetConfig def __init__( self : str , __magic_name__ : Union[str, Any] , **__magic_name__ : Union[str, Any] ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = config lowerCAmelCase__ = TFRegNetEmbeddings(__magic_name__ , name="embedder" ) lowerCAmelCase__ = TFRegNetEncoder(__magic_name__ , name="encoder" ) lowerCAmelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__magic_name__ , name="pooler" ) @unpack_inputs def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : tf.Tensor , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , ): """simple docstring""" lowerCAmelCase__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict lowerCAmelCase__ = self.embedder(__magic_name__ , training=__magic_name__ ) lowerCAmelCase__ = self.encoder( __magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ ) lowerCAmelCase__ = encoder_outputs[0] lowerCAmelCase__ = self.pooler(__magic_name__ ) # Change to NCHW output format have uniformity in the modules lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) ) lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: lowerCAmelCase__ = tuple([tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__magic_name__ , pooler_output=__magic_name__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :str = RegNetConfig snake_case__ :Optional[Any] = 'regnet' snake_case__ :Tuple = 'pixel_values' @property def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )} UpperCAmelCase__ : List[str] = R"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n" UpperCAmelCase__ : Tuple = R"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( 'The bare RegNet model outputting raw features without any specific head on top.' , SCREAMING_SNAKE_CASE__ , ) class A ( SCREAMING_SNAKE_CASE__ ): def __init__( self : Any , __magic_name__ : RegNetConfig , *__magic_name__ : Optional[int] , **__magic_name__ : Union[str, Any] ): """simple docstring""" super().__init__(__magic_name__ , *__magic_name__ , **__magic_name__ ) lowerCAmelCase__ = TFRegNetMainLayer(__magic_name__ , name="regnet" ) @unpack_inputs @add_start_docstrings_to_model_forward(__magic_name__ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : tf.Tensor , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : int=False , ): """simple docstring""" lowerCAmelCase__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict lowerCAmelCase__ = self.regnet( pixel_values=__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( '\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , SCREAMING_SNAKE_CASE__ , ) class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): def __init__( self : Tuple , __magic_name__ : RegNetConfig , *__magic_name__ : Tuple , **__magic_name__ : Optional[int] ): """simple docstring""" super().__init__(__magic_name__ , *__magic_name__ , **__magic_name__ ) lowerCAmelCase__ = config.num_labels lowerCAmelCase__ = TFRegNetMainLayer(__magic_name__ , name="regnet" ) # classification head lowerCAmelCase__ = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(__magic_name__ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : tf.Tensor = None , __magic_name__ : tf.Tensor = None , __magic_name__ : bool = None , __magic_name__ : bool = None , __magic_name__ : Dict=False , ): """simple docstring""" lowerCAmelCase__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict lowerCAmelCase__ = self.regnet( __magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ ) lowerCAmelCase__ = outputs.pooler_output if return_dict else outputs[1] lowerCAmelCase__ = self.classifier[0](__magic_name__ ) lowerCAmelCase__ = self.classifier[1](__magic_name__ ) lowerCAmelCase__ = None if labels is None else self.hf_compute_loss(labels=__magic_name__ , logits=__magic_name__ ) if not return_dict: lowerCAmelCase__ = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=__magic_name__ , logits=__magic_name__ , hidden_states=outputs.hidden_states )
48
0
import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = s.rsplit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return new.join(SCREAMING_SNAKE_CASE ) def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() ) def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = {} lowercase__ = ['''group_1''', '''group_2''', '''group_3''', '''group_4'''] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: lowercase__ = key.replace(f'{group_key}.' , f'{group_key}.group.' ) if "res_path" in key: lowercase__ = key.replace('''res_path.''' , '''res_path.path.''' ) if key.endswith('''.w''' ): lowercase__ = rreplace(SCREAMING_SNAKE_CASE , '''.w''' , '''.weight''' , 1 ) if key.endswith('''.b''' ): lowercase__ = rreplace(SCREAMING_SNAKE_CASE , '''.b''' , '''.bias''' , 1 ) lowercase__ = value.float() return upgrade @torch.no_grad() def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ): """simple docstring""" from dall_e import Encoder lowercase__ = Encoder() if os.path.exists(SCREAMING_SNAKE_CASE ): lowercase__ = torch.load(SCREAMING_SNAKE_CASE ) else: lowercase__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ = ckpt.state_dict() encoder.load_state_dict(SCREAMING_SNAKE_CASE ) if config_path is not None: lowercase__ = FlavaImageCodebookConfig.from_pretrained(SCREAMING_SNAKE_CASE ) else: lowercase__ = FlavaImageCodebookConfig() lowercase__ = FlavaImageCodebook(SCREAMING_SNAKE_CASE ).eval() lowercase__ = encoder.state_dict() lowercase__ = upgrade_state_dict(SCREAMING_SNAKE_CASE ) hf_model.load_state_dict(SCREAMING_SNAKE_CASE ) lowercase__ = hf_model.state_dict() lowercase__ = count_parameters(SCREAMING_SNAKE_CASE ) lowercase__ = count_parameters(SCREAMING_SNAKE_CASE ) assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 ) if save_checkpoint: hf_model.save_pretrained(SCREAMING_SNAKE_CASE ) else: return hf_state_dict if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') lowerCAmelCase = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
43
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def A ( UpperCamelCase_ : Tuple ) -> int: '''simple docstring''' for param in module.parameters(): lowerCAmelCase__ = False def A ( ) -> Tuple: '''simple docstring''' lowerCAmelCase__ = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): lowerCAmelCase__ = "mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def A ( UpperCamelCase_ : Optional[int] ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase__ = plt.imshow(UpperCamelCase_ ) fig.axes.get_xaxis().set_visible(UpperCamelCase_ ) fig.axes.get_yaxis().set_visible(UpperCamelCase_ ) plt.show() def A ( ) -> Union[str, Any]: '''simple docstring''' lowerCAmelCase__ = datetime.now() lowerCAmelCase__ = current_time.strftime("%H:%M:%S" ) return timestamp
48
0
'''simple docstring''' import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def A_ ( *_lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Union[Dict, Any]] = None , _lowerCAmelCase : int=True , _lowerCAmelCase : Union[str, Any]=2 ): """simple docstring""" from .. import __version__ _lowerCamelCase : Union[str, Any] = take_from _lowerCamelCase : Union[str, Any] = () if not isinstance(args[0] , _lowerCAmelCase ): _lowerCamelCase : int = (args,) for attribute, version_name, message in args: if version.parse(version.parse(_lowerCAmelCase ).base_version ) >= version.parse(_lowerCAmelCase ): raise ValueError( F'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\'' F' version {__version__} is >= {version_name}' ) _lowerCamelCase : Dict = None if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(_lowerCAmelCase ),) _lowerCamelCase : Optional[int] = F'The `{attribute}` argument is deprecated and will be removed in version {version_name}.' elif hasattr(_lowerCAmelCase , _lowerCAmelCase ): values += (getattr(_lowerCAmelCase , _lowerCAmelCase ),) _lowerCamelCase : Tuple = F'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.' elif deprecated_kwargs is None: _lowerCamelCase : int = F'`{attribute}` is deprecated and will be removed in version {version_name}.' if warning is not None: _lowerCamelCase : List[Any] = warning + " " if standard_warn else "" warnings.warn(warning + message , _lowerCAmelCase , stacklevel=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) > 0: _lowerCamelCase : Optional[Any] = inspect.getouterframes(inspect.currentframe() )[1] _lowerCamelCase : Optional[Any] = call_frame.filename _lowerCamelCase : Dict = call_frame.lineno _lowerCamelCase : int = call_frame.function _lowerCamelCase , _lowerCamelCase : Any = next(iter(deprecated_kwargs.items() ) ) raise TypeError(F'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' ) if len(_lowerCAmelCase ) == 0: return elif len(_lowerCAmelCase ) == 1: return values[0] return values
44
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCAmelCase__ : List[Any] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Union[str, Any] = ["EncoderDecoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Optional[int] = ["TFEncoderDecoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Optional[Any] = ["FlaxEncoderDecoderModel"] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys UpperCAmelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
48
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available UpperCamelCase = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ["SpeechEncoderDecoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ["FlaxSpeechEncoderDecoderModel"] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
45
'''simple docstring''' import argparse from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird from transformers.utils import logging logging.set_verbosity_info() def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : int ) -> Any: '''simple docstring''' lowerCAmelCase__ = BigBirdConfig.from_json_file(UpperCamelCase_ ) print(F"""Building PyTorch model from configuration: {config}""" ) if is_trivia_qa: lowerCAmelCase__ = BigBirdForQuestionAnswering(UpperCamelCase_ ) else: lowerCAmelCase__ = BigBirdForPreTraining(UpperCamelCase_ ) # Load weights from tf checkpoint load_tf_weights_in_big_bird(UpperCamelCase_ , UpperCamelCase_ , is_trivia_qa=UpperCamelCase_ ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(UpperCamelCase_ ) if __name__ == "__main__": UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--big_bird_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head." ) UpperCAmelCase__ : int = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa )
48
0
"""simple docstring""" import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' assert isinstance(_lowerCamelCase , _lowerCamelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int: '''simple docstring''' _lowerCamelCase : Tuple = tmp_path / "cache" _lowerCamelCase : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _lowerCamelCase : Dict = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read() _check_parquet_dataset(_lowerCamelCase , _lowerCamelCase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' _lowerCamelCase : Optional[Any] = tmp_path / "cache" _lowerCamelCase : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} _lowerCamelCase : Optional[Any] = features.copy() if features else default_expected_features _lowerCamelCase : Optional[Any] = ( Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) _lowerCamelCase : Any = ParquetDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read() _check_parquet_dataset(_lowerCamelCase , _lowerCamelCase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str: '''simple docstring''' _lowerCamelCase : Any = tmp_path / "cache" _lowerCamelCase : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"} _lowerCamelCase : List[str] = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase ).read() _check_parquet_dataset(_lowerCamelCase , _lowerCamelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]: '''simple docstring''' if issubclass(_lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Tuple = parquet_path elif issubclass(_lowerCamelCase , _lowerCamelCase ): _lowerCamelCase : Any = [parquet_path] _lowerCamelCase : Any = tmp_path / "cache" _lowerCamelCase : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} _lowerCamelCase : Union[str, Any] = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase ).read() _check_parquet_dataset(_lowerCamelCase , _lowerCamelCase ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=("train",) ) -> Dict: '''simple docstring''' assert isinstance(_lowerCamelCase , _lowerCamelCase ) for split in splits: _lowerCamelCase : List[str] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: '''simple docstring''' _lowerCamelCase : Any = tmp_path / "cache" _lowerCamelCase : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _lowerCamelCase : Any = ParquetDatasetReader( {"train": parquet_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read() _check_parquet_datasetdict(_lowerCamelCase , _lowerCamelCase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: '''simple docstring''' _lowerCamelCase : int = tmp_path / "cache" _lowerCamelCase : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} _lowerCamelCase : Union[str, Any] = features.copy() if features else default_expected_features _lowerCamelCase : str = ( Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) _lowerCamelCase : str = ParquetDatasetReader({"train": parquet_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read() _check_parquet_datasetdict(_lowerCamelCase , _lowerCamelCase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict: '''simple docstring''' if split: _lowerCamelCase : Union[str, Any] = {split: parquet_path} else: _lowerCamelCase : Optional[Any] = "train" _lowerCamelCase : Optional[Any] = {"train": parquet_path, "test": parquet_path} _lowerCamelCase : Optional[int] = tmp_path / "cache" _lowerCamelCase : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} _lowerCamelCase : Tuple = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase ).read() _check_parquet_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]: '''simple docstring''' _lowerCamelCase : Dict = ParquetDatasetWriter(_lowerCamelCase , tmp_path / "foo.parquet" ) assert writer.write() > 0 _lowerCamelCase : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" ) _lowerCamelCase : List[Any] = pf.read() assert dataset.data.table == output_table def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]: '''simple docstring''' _lowerCamelCase : Any = str(shared_datadir / "test_image_rgb.jpg" ) _lowerCamelCase : Optional[Any] = {"image": [image_path]} _lowerCamelCase : List[str] = Features({"image": Image()} ) _lowerCamelCase : List[str] = Dataset.from_dict(_lowerCamelCase , features=_lowerCamelCase ) _lowerCamelCase : List[Any] = ParquetDatasetWriter(_lowerCamelCase , tmp_path / "foo.parquet" ) assert writer.write() > 0 _lowerCamelCase : List[Any] = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) ) assert dataset.features == reloaded_dataset.features _lowerCamelCase : List[str] = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=_lowerCamelCase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( "feature, expected" , [ (Features({"foo": Value("int32" )} ), None), (Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Any: '''simple docstring''' assert get_writer_batch_size(_lowerCamelCase ) == expected
46
'''simple docstring''' from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class A : def __init__( self : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : str=13 , __magic_name__ : List[str]=7 , __magic_name__ : Tuple=True , __magic_name__ : Tuple=True , __magic_name__ : str=True , __magic_name__ : int=True , __magic_name__ : int=99 , __magic_name__ : List[str]=[1, 1, 2] , __magic_name__ : Dict=1 , __magic_name__ : Tuple=32 , __magic_name__ : Any=4 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[Any]=37 , __magic_name__ : Tuple="gelu_new" , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : Tuple=0.0 , __magic_name__ : int=512 , __magic_name__ : Optional[int]=3 , __magic_name__ : List[str]=0.02 , __magic_name__ : Dict=3 , __magic_name__ : List[Any]=4 , __magic_name__ : Any=None , __magic_name__ : Dict=False , ): """simple docstring""" lowerCAmelCase__ = parent lowerCAmelCase__ = batch_size lowerCAmelCase__ = seq_length lowerCAmelCase__ = is_training lowerCAmelCase__ = use_input_mask lowerCAmelCase__ = use_token_type_ids lowerCAmelCase__ = use_labels lowerCAmelCase__ = vocab_size lowerCAmelCase__ = block_sizes lowerCAmelCase__ = num_decoder_layers lowerCAmelCase__ = d_model lowerCAmelCase__ = n_head lowerCAmelCase__ = d_head lowerCAmelCase__ = d_inner lowerCAmelCase__ = hidden_act lowerCAmelCase__ = hidden_dropout lowerCAmelCase__ = attention_dropout lowerCAmelCase__ = activation_dropout lowerCAmelCase__ = max_position_embeddings lowerCAmelCase__ = type_vocab_size lowerCAmelCase__ = 2 lowerCAmelCase__ = num_labels lowerCAmelCase__ = num_choices lowerCAmelCase__ = scope lowerCAmelCase__ = initializer_std # Used in the tests to check the size of the first attention layer lowerCAmelCase__ = n_head # Used in the tests to check the size of the first hidden state lowerCAmelCase__ = self.d_model # Used in the tests to check the number of output hidden states/attentions lowerCAmelCase__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: lowerCAmelCase__ = self.num_hidden_layers + 2 def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ = None if self.use_input_mask: lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ = None if self.use_token_type_ids: lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase__ = None lowerCAmelCase__ = None lowerCAmelCase__ = None if self.use_labels: lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase__ = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : str , ): """simple docstring""" lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ ) lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowerCAmelCase__ = model(__magic_name__ ) lowerCAmelCase__ = [input_ids, input_mask] lowerCAmelCase__ = model(__magic_name__ ) lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) lowerCAmelCase__ = False lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ ) lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) lowerCAmelCase__ = False lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ ) lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : int , ): """simple docstring""" lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ ) lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowerCAmelCase__ = model(__magic_name__ ) lowerCAmelCase__ = [input_ids, input_mask] lowerCAmelCase__ = model(__magic_name__ ) lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) lowerCAmelCase__ = False lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ ) lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) ) lowerCAmelCase__ = False lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ ) lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , ): """simple docstring""" lowerCAmelCase__ = TFFunnelForPreTraining(config=__magic_name__ ) lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Dict , ): """simple docstring""" lowerCAmelCase__ = TFFunnelForMaskedLM(config=__magic_name__ ) lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Any , ): """simple docstring""" lowerCAmelCase__ = self.num_labels lowerCAmelCase__ = TFFunnelForSequenceClassification(config=__magic_name__ ) lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : List[str] , ): """simple docstring""" lowerCAmelCase__ = self.num_choices lowerCAmelCase__ = TFFunnelForMultipleChoice(config=__magic_name__ ) lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) ) lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) ) lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) ) lowerCAmelCase__ = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : str , ): """simple docstring""" lowerCAmelCase__ = self.num_labels lowerCAmelCase__ = TFFunnelForTokenClassification(config=__magic_name__ ) lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : List[str] , ): """simple docstring""" lowerCAmelCase__ = TFFunnelForQuestionAnswering(config=__magic_name__ ) lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowerCAmelCase__ = model(__magic_name__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = self.prepare_config_and_inputs() ( ( lowerCAmelCase__ ) ,( lowerCAmelCase__ ) ,( lowerCAmelCase__ ) ,( lowerCAmelCase__ ) ,( lowerCAmelCase__ ) ,( lowerCAmelCase__ ) ,( lowerCAmelCase__ ) , ) = config_and_inputs lowerCAmelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): snake_case__ :int = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) snake_case__ :Any = ( { 'feature-extraction': (TFFunnelBaseModel, TFFunnelModel), 'fill-mask': TFFunnelForMaskedLM, 'question-answering': TFFunnelForQuestionAnswering, 'text-classification': TFFunnelForSequenceClassification, 'token-classification': TFFunnelForTokenClassification, 'zero-shot': TFFunnelForSequenceClassification, } if is_tf_available() else {} ) snake_case__ :str = False snake_case__ :Any = False def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = TFFunnelModelTester(self ) lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" self.config_tester.run_common_tests() def __SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__magic_name__ ) @require_tf class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): snake_case__ :Any = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) snake_case__ :int = False snake_case__ :List[Any] = False def __SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" lowerCAmelCase__ = TFFunnelModelTester(self , base=__magic_name__ ) lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" self.config_tester.run_common_tests() def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__magic_name__ )
48
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available SCREAMING_SNAKE_CASE__ = { '''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''NezhaForNextSentencePrediction''', '''NezhaForMaskedLM''', '''NezhaForPreTraining''', '''NezhaForMultipleChoice''', '''NezhaForQuestionAnswering''', '''NezhaForSequenceClassification''', '''NezhaForTokenClassification''', '''NezhaModel''', '''NezhaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
47
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging UpperCAmelCase__ : Tuple = logging.get_logger(__name__) UpperCAmelCase__ : List[str] = { "google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json", # See all umt5 models at https://huggingface.co/models?filter=umt5 } class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Union[str, Any] = 'umt5' snake_case__ :Any = ['past_key_values'] def __init__( self : List[Any] , __magic_name__ : Tuple=250112 , __magic_name__ : str=512 , __magic_name__ : int=64 , __magic_name__ : str=1024 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[Any]=6 , __magic_name__ : Dict=32 , __magic_name__ : Optional[Any]=128 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=1E-6 , __magic_name__ : Optional[int]=1.0 , __magic_name__ : Dict="gated-gelu" , __magic_name__ : List[str]=True , __magic_name__ : Tuple=True , __magic_name__ : Optional[int]="T5Tokenizer" , __magic_name__ : str=True , __magic_name__ : int=0 , __magic_name__ : Union[str, Any]=1 , __magic_name__ : str=0 , **__magic_name__ : Any , ): """simple docstring""" super().__init__( is_encoder_decoder=__magic_name__ , tokenizer_class=__magic_name__ , tie_word_embeddings=__magic_name__ , pad_token_id=__magic_name__ , eos_token_id=__magic_name__ , decoder_start_token_id=__magic_name__ , **__magic_name__ , ) lowerCAmelCase__ = vocab_size lowerCAmelCase__ = d_model lowerCAmelCase__ = d_kv lowerCAmelCase__ = d_ff lowerCAmelCase__ = num_layers lowerCAmelCase__ = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowerCAmelCase__ = num_heads lowerCAmelCase__ = relative_attention_num_buckets lowerCAmelCase__ = relative_attention_max_distance lowerCAmelCase__ = dropout_rate lowerCAmelCase__ = layer_norm_epsilon lowerCAmelCase__ = initializer_factor lowerCAmelCase__ = feed_forward_proj lowerCAmelCase__ = use_cache lowerCAmelCase__ = self.feed_forward_proj.split("-" ) lowerCAmelCase__ = act_info[-1] lowerCAmelCase__ = act_info[0] == "gated" if len(__magic_name__ ) > 1 and act_info[0] != "gated" or len(__magic_name__ ) > 2: raise ValueError( f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'" ) if feed_forward_proj == "gated-gelu": lowerCAmelCase__ = "gelu_new" @property def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" return self.d_model @property def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" return self.num_heads @property def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" return self.num_layers class A ( SCREAMING_SNAKE_CASE__ ): @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" lowerCAmelCase__ = { "input_ids": {0: "batch", 1: "encoder_sequence"}, "attention_mask": {0: "batch", 1: "encoder_sequence"}, } if self.use_past: lowerCAmelCase__ = "past_encoder_sequence + sequence" lowerCAmelCase__ = {0: "batch"} lowerCAmelCase__ = {0: "batch", 1: "past_decoder_sequence + sequence"} else: lowerCAmelCase__ = {0: "batch", 1: "decoder_sequence"} lowerCAmelCase__ = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(__magic_name__ , direction="inputs" ) return common_inputs @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" return 13 @property def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" return 5E-4
48
0
"""simple docstring""" def lowercase__ ( snake_case_ :str ): assert column_title.isupper() __UpperCAmelCase = 0 __UpperCAmelCase = len(snake_case_ ) - 1 __UpperCAmelCase = 0 while index >= 0: __UpperCAmelCase = (ord(column_title[index] ) - 64) * pow(26 , snake_case_ ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
49
'''simple docstring''' from __future__ import annotations from collections import Counter from random import random class A : def __init__( self : Optional[int] ): """simple docstring""" lowerCAmelCase__ = {} def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ): """simple docstring""" lowerCAmelCase__ = {} def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : str , __magic_name__ : float ): """simple docstring""" if nodea not in self.connections: self.add_node(__magic_name__ ) if nodea not in self.connections: self.add_node(__magic_name__ ) lowerCAmelCase__ = probability def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" return list(self.connections ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ): """simple docstring""" lowerCAmelCase__ = 0 lowerCAmelCase__ = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def A ( UpperCamelCase_ : str , UpperCamelCase_ : list[tuple[str, str, float]] , UpperCamelCase_ : int ) -> dict[str, int]: '''simple docstring''' lowerCAmelCase__ = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ = Counter(graph.get_nodes() ) lowerCAmelCase__ = start for _ in range(UpperCamelCase_ ): lowerCAmelCase__ = graph.transition(UpperCamelCase_ ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
48
0
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = (DEISMultistepScheduler,) _UpperCamelCase = (('num_inference_steps', 25),) def UpperCamelCase_ ( self ,**_lowerCAmelCase ): lowerCamelCase__ = { """num_train_timesteps""": 10_00, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """solver_order""": 2, } config.update(**_lowerCAmelCase ) return config def UpperCamelCase_ ( self ,_lowerCAmelCase=0 ,**_lowerCAmelCase ): lowerCamelCase__ = dict(self.forward_default_kwargs ) lowerCamelCase__ = kwargs.pop("""num_inference_steps""" ,_lowerCAmelCase ) lowerCamelCase__ = self.dummy_sample lowerCamelCase__ = 0.1 * sample lowerCamelCase__ = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: lowerCamelCase__ = self.get_scheduler_config(**_lowerCAmelCase ) lowerCamelCase__ = scheduler_class(**_lowerCAmelCase ) scheduler.set_timesteps(_lowerCAmelCase ) # copy over dummy past residuals lowerCamelCase__ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_lowerCAmelCase ) lowerCamelCase__ = scheduler_class.from_pretrained(_lowerCAmelCase ) new_scheduler.set_timesteps(_lowerCAmelCase ) # copy over dummy past residuals lowerCamelCase__ = dummy_past_residuals[: new_scheduler.config.solver_order] lowerCamelCase__ , lowerCamelCase__ = sample, sample for t in range(_lowerCAmelCase ,time_step + scheduler.config.solver_order + 1 ): lowerCamelCase__ = scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ).prev_sample lowerCamelCase__ = new_scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ,_lowerCAmelCase=0 ,**_lowerCAmelCase ): lowerCamelCase__ = dict(self.forward_default_kwargs ) lowerCamelCase__ = kwargs.pop("""num_inference_steps""" ,_lowerCAmelCase ) lowerCamelCase__ = self.dummy_sample lowerCamelCase__ = 0.1 * sample lowerCamelCase__ = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: lowerCamelCase__ = self.get_scheduler_config() lowerCamelCase__ = scheduler_class(**_lowerCAmelCase ) scheduler.set_timesteps(_lowerCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) lowerCamelCase__ = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_lowerCAmelCase ) lowerCamelCase__ = scheduler_class.from_pretrained(_lowerCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(_lowerCAmelCase ) # copy over dummy past residual (must be after setting timesteps) lowerCamelCase__ = dummy_past_residuals[: new_scheduler.config.solver_order] lowerCamelCase__ = scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ).prev_sample lowerCamelCase__ = new_scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCamelCase_ ( self ,_lowerCAmelCase=None ,**_lowerCAmelCase ): if scheduler is None: lowerCamelCase__ = self.scheduler_classes[0] lowerCamelCase__ = self.get_scheduler_config(**_lowerCAmelCase ) lowerCamelCase__ = scheduler_class(**_lowerCAmelCase ) lowerCamelCase__ = self.scheduler_classes[0] lowerCamelCase__ = self.get_scheduler_config(**_lowerCAmelCase ) lowerCamelCase__ = scheduler_class(**_lowerCAmelCase ) lowerCamelCase__ = 10 lowerCamelCase__ = self.dummy_model() lowerCamelCase__ = self.dummy_sample_deter scheduler.set_timesteps(_lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCamelCase__ = model(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ).prev_sample return sample def UpperCamelCase_ ( self ): lowerCamelCase__ = dict(self.forward_default_kwargs ) lowerCamelCase__ = kwargs.pop("""num_inference_steps""" ,_lowerCAmelCase ) for scheduler_class in self.scheduler_classes: lowerCamelCase__ = self.get_scheduler_config() lowerCamelCase__ = scheduler_class(**_lowerCAmelCase ) lowerCamelCase__ = self.dummy_sample lowerCamelCase__ = 0.1 * sample if num_inference_steps is not None and hasattr(_lowerCAmelCase ,"""set_timesteps""" ): scheduler.set_timesteps(_lowerCAmelCase ) elif num_inference_steps is not None and not hasattr(_lowerCAmelCase ,"""set_timesteps""" ): lowerCamelCase__ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) lowerCamelCase__ = [residual + 0.2, residual + 0.15, residual + 0.10] lowerCamelCase__ = dummy_past_residuals[: scheduler.config.solver_order] lowerCamelCase__ = scheduler.timesteps[5] lowerCamelCase__ = scheduler.timesteps[6] lowerCamelCase__ = scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ).prev_sample lowerCamelCase__ = scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ).prev_sample self.assertEqual(output_a.shape ,sample.shape ) self.assertEqual(output_a.shape ,output_a.shape ) def UpperCamelCase_ ( self ): # make sure that iterating over schedulers with same config names gives same results # for defaults lowerCamelCase__ = DEISMultistepScheduler(**self.get_scheduler_config() ) lowerCamelCase__ = self.full_loop(scheduler=_lowerCAmelCase ) lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_3916 ) < 1E-3 lowerCamelCase__ = DPMSolverSinglestepScheduler.from_config(scheduler.config ) lowerCamelCase__ = DPMSolverMultistepScheduler.from_config(scheduler.config ) lowerCamelCase__ = UniPCMultistepScheduler.from_config(scheduler.config ) lowerCamelCase__ = DEISMultistepScheduler.from_config(scheduler.config ) lowerCamelCase__ = self.full_loop(scheduler=_lowerCAmelCase ) lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_3916 ) < 1E-3 def UpperCamelCase_ ( self ): for timesteps in [25, 50, 1_00, 9_99, 10_00]: self.check_over_configs(num_train_timesteps=_lowerCAmelCase ) def UpperCamelCase_ ( self ): self.check_over_configs(thresholding=_lowerCAmelCase ) for order in [1, 2, 3]: for solver_type in ["logrho"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=_lowerCAmelCase ,prediction_type=_lowerCAmelCase ,sample_max_value=_lowerCAmelCase ,algorithm_type="""deis""" ,solver_order=_lowerCAmelCase ,solver_type=_lowerCAmelCase ,) def UpperCamelCase_ ( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_lowerCAmelCase ) def UpperCamelCase_ ( self ): for algorithm_type in ["deis"]: for solver_type in ["logrho"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=_lowerCAmelCase ,solver_type=_lowerCAmelCase ,prediction_type=_lowerCAmelCase ,algorithm_type=_lowerCAmelCase ,) lowerCamelCase__ = self.full_loop( solver_order=_lowerCAmelCase ,solver_type=_lowerCAmelCase ,prediction_type=_lowerCAmelCase ,algorithm_type=_lowerCAmelCase ,) assert not torch.isnan(_lowerCAmelCase ).any(), "Samples have nan numbers" def UpperCamelCase_ ( self ): self.check_over_configs(lower_order_final=_lowerCAmelCase ) self.check_over_configs(lower_order_final=_lowerCAmelCase ) def UpperCamelCase_ ( self ): for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]: self.check_over_forward(num_inference_steps=_lowerCAmelCase ,time_step=0 ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.full_loop() lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.2_3916 ) < 1E-3 def UpperCamelCase_ ( self ): lowerCamelCase__ = self.full_loop(prediction_type="""v_prediction""" ) lowerCamelCase__ = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_mean.item() - 0.091 ) < 1E-3 def UpperCamelCase_ ( self ): lowerCamelCase__ = self.scheduler_classes[0] lowerCamelCase__ = self.get_scheduler_config(thresholding=_lowerCAmelCase ,dynamic_thresholding_ratio=0 ) lowerCamelCase__ = scheduler_class(**_lowerCAmelCase ) lowerCamelCase__ = 10 lowerCamelCase__ = self.dummy_model() lowerCamelCase__ = self.dummy_sample_deter.half() scheduler.set_timesteps(_lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCamelCase__ = model(_lowerCAmelCase ,_lowerCAmelCase ) lowerCamelCase__ = scheduler.step(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ).prev_sample assert sample.dtype == torch.floataa
50
'''simple docstring''' import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration UpperCAmelCase__ : Optional[Any] = pytest.mark.integration UpperCAmelCase__ : str = {"comet"} UpperCAmelCase__ : Optional[Any] = importlib.util.find_spec("fairseq") is not None UpperCAmelCase__ : Optional[int] = {"code_eval"} UpperCAmelCase__ : List[Any] = os.name == "nt" UpperCAmelCase__ : Optional[int] = {"bertscore", "frugalscore", "perplexity"} UpperCAmelCase__ : int = importlib.util.find_spec("transformers") is not None def A ( UpperCamelCase_ : Optional[int] ) -> Optional[Any]: '''simple docstring''' @wraps(UpperCamelCase_ ) def wrapper(self : Optional[Any] , UpperCamelCase_ : List[str] ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest("\"test requires Fairseq\"" ) else: test_case(self , UpperCamelCase_ ) return wrapper def A ( UpperCamelCase_ : List[Any] ) -> str: '''simple docstring''' @wraps(UpperCamelCase_ ) def wrapper(self : Optional[int] , UpperCamelCase_ : int ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest("\"test requires transformers\"" ) else: test_case(self , UpperCamelCase_ ) return wrapper def A ( UpperCamelCase_ : Any ) -> int: '''simple docstring''' @wraps(UpperCamelCase_ ) def wrapper(self : Optional[int] , UpperCamelCase_ : Optional[Any] ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest("\"test not supported on Windows\"" ) else: test_case(self , UpperCamelCase_ ) return wrapper def A ( ) -> Tuple: '''simple docstring''' lowerCAmelCase__ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @local class A ( parameterized.TestCase ): snake_case__ :Union[str, Any] = {} snake_case__ :Optional[Any] = None @pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" ) @pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = "[...]" lowerCAmelCase__ = importlib.import_module( datasets.load.metric_module_factory(os.path.join("metrics" , __magic_name__ ) ).module_path ) lowerCAmelCase__ = datasets.load.import_main_class(metric_module.__name__ , dataset=__magic_name__ ) # check parameters lowerCAmelCase__ = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(__magic_name__ , metric_module.__name__ ): with self.use_local_metrics(): try: lowerCAmelCase__ = doctest.testmod(__magic_name__ , verbose=__magic_name__ , raise_on_error=__magic_name__ ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Tuple ): """simple docstring""" lowerCAmelCase__ = "[...]" lowerCAmelCase__ = importlib.import_module( datasets.load.metric_module_factory(os.path.join("metrics" , __magic_name__ ) ).module_path ) # run doctest with self.use_local_metrics(): lowerCAmelCase__ = doctest.testmod(__magic_name__ , verbose=__magic_name__ , raise_on_error=__magic_name__ ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str ): """simple docstring""" if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](__magic_name__ ): yield else: yield @contextmanager def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" def load_local_metric(__magic_name__ : Union[str, Any] , *__magic_name__ : Any , **__magic_name__ : Any ): return load_metric(os.path.join("metrics" , __magic_name__ ) , *__magic_name__ , **__magic_name__ ) with patch("datasets.load_metric" ) as mock_load_metric: lowerCAmelCase__ = load_local_metric yield @classmethod def __SCREAMING_SNAKE_CASE ( cls : Any , __magic_name__ : Optional[int] ): """simple docstring""" def wrapper(__magic_name__ : Dict ): lowerCAmelCase__ = contextmanager(__magic_name__ ) lowerCAmelCase__ = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher("bleurt" ) def A ( UpperCamelCase_ : str ) -> Any: '''simple docstring''' import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags class A ( SCREAMING_SNAKE_CASE__ ): def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] ): """simple docstring""" assert len(input_dict["input_ids"] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch("bleurt.score._create_predictor" ) as mock_create_predictor: lowerCAmelCase__ = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher("bertscore" ) def A ( UpperCamelCase_ : List[Any] ) -> Optional[Any]: '''simple docstring''' import torch def bert_cos_score_idf(UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : List[str] ): return torch.tensor([[1.0, 1.0, 1.0]] * len(UpperCamelCase_ ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch("bert_score.scorer.get_model" ), patch( "bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf: lowerCAmelCase__ = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher("comet" ) def A ( UpperCamelCase_ : Optional[int] ) -> Any: '''simple docstring''' def load_from_checkpoint(UpperCamelCase_ : Tuple ): class A : def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] , *__magic_name__ : int , **__magic_name__ : Dict ): """simple docstring""" assert len(__magic_name__ ) == 2 lowerCAmelCase__ = [0.19, 0.92] return scores, sum(__magic_name__ ) / len(__magic_name__ ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch("comet.download_model" ) as mock_download_model: lowerCAmelCase__ = None with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint: lowerCAmelCase__ = load_from_checkpoint yield def A ( ) -> Tuple: '''simple docstring''' lowerCAmelCase__ = load_metric(os.path.join("metrics" , "seqeval" ) ) lowerCAmelCase__ = "ERROR" lowerCAmelCase__ = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}""" with pytest.raises(UpperCamelCase_ , match=re.escape(UpperCamelCase_ ) ): metric.compute(predictions=[] , references=[] , scheme=UpperCamelCase_ )
48
0
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : Any = { 'configuration_autoformer': [ 'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AutoformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Union[str, Any] = [ 'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'AutoformerForPrediction', 'AutoformerModel', 'AutoformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys a__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
51
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool UpperCAmelCase__ : int = { "Acehnese Arabic": "ace_Arab", "Acehnese Latin": "ace_Latn", "Mesopotamian Arabic": "acm_Arab", "Ta'izzi-Adeni Arabic": "acq_Arab", "Tunisian Arabic": "aeb_Arab", "Afrikaans": "afr_Latn", "South Levantine Arabic": "ajp_Arab", "Akan": "aka_Latn", "Amharic": "amh_Ethi", "North Levantine Arabic": "apc_Arab", "Modern Standard Arabic": "arb_Arab", "Modern Standard Arabic Romanized": "arb_Latn", "Najdi Arabic": "ars_Arab", "Moroccan Arabic": "ary_Arab", "Egyptian Arabic": "arz_Arab", "Assamese": "asm_Beng", "Asturian": "ast_Latn", "Awadhi": "awa_Deva", "Central Aymara": "ayr_Latn", "South Azerbaijani": "azb_Arab", "North Azerbaijani": "azj_Latn", "Bashkir": "bak_Cyrl", "Bambara": "bam_Latn", "Balinese": "ban_Latn", "Belarusian": "bel_Cyrl", "Bemba": "bem_Latn", "Bengali": "ben_Beng", "Bhojpuri": "bho_Deva", "Banjar Arabic": "bjn_Arab", "Banjar Latin": "bjn_Latn", "Standard Tibetan": "bod_Tibt", "Bosnian": "bos_Latn", "Buginese": "bug_Latn", "Bulgarian": "bul_Cyrl", "Catalan": "cat_Latn", "Cebuano": "ceb_Latn", "Czech": "ces_Latn", "Chokwe": "cjk_Latn", "Central Kurdish": "ckb_Arab", "Crimean Tatar": "crh_Latn", "Welsh": "cym_Latn", "Danish": "dan_Latn", "German": "deu_Latn", "Southwestern Dinka": "dik_Latn", "Dyula": "dyu_Latn", "Dzongkha": "dzo_Tibt", "Greek": "ell_Grek", "English": "eng_Latn", "Esperanto": "epo_Latn", "Estonian": "est_Latn", "Basque": "eus_Latn", "Ewe": "ewe_Latn", "Faroese": "fao_Latn", "Fijian": "fij_Latn", "Finnish": "fin_Latn", "Fon": "fon_Latn", "French": "fra_Latn", "Friulian": "fur_Latn", "Nigerian Fulfulde": "fuv_Latn", "Scottish Gaelic": "gla_Latn", "Irish": "gle_Latn", "Galician": "glg_Latn", "Guarani": "grn_Latn", "Gujarati": "guj_Gujr", "Haitian Creole": "hat_Latn", "Hausa": "hau_Latn", "Hebrew": "heb_Hebr", "Hindi": "hin_Deva", "Chhattisgarhi": "hne_Deva", "Croatian": "hrv_Latn", "Hungarian": "hun_Latn", "Armenian": "hye_Armn", "Igbo": "ibo_Latn", "Ilocano": "ilo_Latn", "Indonesian": "ind_Latn", "Icelandic": "isl_Latn", "Italian": "ita_Latn", "Javanese": "jav_Latn", "Japanese": "jpn_Jpan", "Kabyle": "kab_Latn", "Jingpho": "kac_Latn", "Kamba": "kam_Latn", "Kannada": "kan_Knda", "Kashmiri Arabic": "kas_Arab", "Kashmiri Devanagari": "kas_Deva", "Georgian": "kat_Geor", "Central Kanuri Arabic": "knc_Arab", "Central Kanuri Latin": "knc_Latn", "Kazakh": "kaz_Cyrl", "Kabiyè": "kbp_Latn", "Kabuverdianu": "kea_Latn", "Khmer": "khm_Khmr", "Kikuyu": "kik_Latn", "Kinyarwanda": "kin_Latn", "Kyrgyz": "kir_Cyrl", "Kimbundu": "kmb_Latn", "Northern Kurdish": "kmr_Latn", "Kikongo": "kon_Latn", "Korean": "kor_Hang", "Lao": "lao_Laoo", "Ligurian": "lij_Latn", "Limburgish": "lim_Latn", "Lingala": "lin_Latn", "Lithuanian": "lit_Latn", "Lombard": "lmo_Latn", "Latgalian": "ltg_Latn", "Luxembourgish": "ltz_Latn", "Luba-Kasai": "lua_Latn", "Ganda": "lug_Latn", "Luo": "luo_Latn", "Mizo": "lus_Latn", "Standard Latvian": "lvs_Latn", "Magahi": "mag_Deva", "Maithili": "mai_Deva", "Malayalam": "mal_Mlym", "Marathi": "mar_Deva", "Minangkabau Arabic ": "min_Arab", "Minangkabau Latin": "min_Latn", "Macedonian": "mkd_Cyrl", "Plateau Malagasy": "plt_Latn", "Maltese": "mlt_Latn", "Meitei Bengali": "mni_Beng", "Halh Mongolian": "khk_Cyrl", "Mossi": "mos_Latn", "Maori": "mri_Latn", "Burmese": "mya_Mymr", "Dutch": "nld_Latn", "Norwegian Nynorsk": "nno_Latn", "Norwegian Bokmål": "nob_Latn", "Nepali": "npi_Deva", "Northern Sotho": "nso_Latn", "Nuer": "nus_Latn", "Nyanja": "nya_Latn", "Occitan": "oci_Latn", "West Central Oromo": "gaz_Latn", "Odia": "ory_Orya", "Pangasinan": "pag_Latn", "Eastern Panjabi": "pan_Guru", "Papiamento": "pap_Latn", "Western Persian": "pes_Arab", "Polish": "pol_Latn", "Portuguese": "por_Latn", "Dari": "prs_Arab", "Southern Pashto": "pbt_Arab", "Ayacucho Quechua": "quy_Latn", "Romanian": "ron_Latn", "Rundi": "run_Latn", "Russian": "rus_Cyrl", "Sango": "sag_Latn", "Sanskrit": "san_Deva", "Santali": "sat_Olck", "Sicilian": "scn_Latn", "Shan": "shn_Mymr", "Sinhala": "sin_Sinh", "Slovak": "slk_Latn", "Slovenian": "slv_Latn", "Samoan": "smo_Latn", "Shona": "sna_Latn", "Sindhi": "snd_Arab", "Somali": "som_Latn", "Southern Sotho": "sot_Latn", "Spanish": "spa_Latn", "Tosk Albanian": "als_Latn", "Sardinian": "srd_Latn", "Serbian": "srp_Cyrl", "Swati": "ssw_Latn", "Sundanese": "sun_Latn", "Swedish": "swe_Latn", "Swahili": "swh_Latn", "Silesian": "szl_Latn", "Tamil": "tam_Taml", "Tatar": "tat_Cyrl", "Telugu": "tel_Telu", "Tajik": "tgk_Cyrl", "Tagalog": "tgl_Latn", "Thai": "tha_Thai", "Tigrinya": "tir_Ethi", "Tamasheq Latin": "taq_Latn", "Tamasheq Tifinagh": "taq_Tfng", "Tok Pisin": "tpi_Latn", "Tswana": "tsn_Latn", "Tsonga": "tso_Latn", "Turkmen": "tuk_Latn", "Tumbuka": "tum_Latn", "Turkish": "tur_Latn", "Twi": "twi_Latn", "Central Atlas Tamazight": "tzm_Tfng", "Uyghur": "uig_Arab", "Ukrainian": "ukr_Cyrl", "Umbundu": "umb_Latn", "Urdu": "urd_Arab", "Northern Uzbek": "uzn_Latn", "Venetian": "vec_Latn", "Vietnamese": "vie_Latn", "Waray": "war_Latn", "Wolof": "wol_Latn", "Xhosa": "xho_Latn", "Eastern Yiddish": "ydd_Hebr", "Yoruba": "yor_Latn", "Yue Chinese": "yue_Hant", "Chinese Simplified": "zho_Hans", "Chinese Traditional": "zho_Hant", "Standard Malay": "zsm_Latn", "Zulu": "zul_Latn", } class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Tuple = 'facebook/nllb-200-distilled-600M' snake_case__ :Optional[Any] = ( 'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ' 'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ' 'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ' 'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.' ) snake_case__ :List[Any] = 'translator' snake_case__ :List[Any] = AutoTokenizer snake_case__ :Optional[Any] = AutoModelForSeqaSeqLM snake_case__ :List[str] = LANGUAGE_CODES snake_case__ :List[Any] = ['text', 'text', 'text'] snake_case__ :List[Any] = ['text'] def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ): """simple docstring""" if src_lang not in self.lang_to_code: raise ValueError(f"""{src_lang} is not a supported language.""" ) if tgt_lang not in self.lang_to_code: raise ValueError(f"""{tgt_lang} is not a supported language.""" ) lowerCAmelCase__ = self.lang_to_code[src_lang] lowerCAmelCase__ = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( __magic_name__ , return_tensors="pt" , src_lang=__magic_name__ , tgt_lang=__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] ): """simple docstring""" return self.model.generate(**__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Tuple ): """simple docstring""" return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__magic_name__ )
48
0
"""simple docstring""" from __future__ import annotations def __A ( a_ :list[int]) -> int: if not nums: return 0 __a : Any = nums[0] __a : Optional[Any] = 0 for num in nums[1:]: __a , __a : Optional[Any] = ( max_excluding + num, max(a_ , a_), ) return max(a_ , a_) if __name__ == "__main__": import doctest doctest.testmod()
52
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ : int = logging.get_logger(__name__) class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Any = 'timm_backbone' def __init__( self : Tuple , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=3 , __magic_name__ : Dict=True , __magic_name__ : str=True , __magic_name__ : List[Any]=None , **__magic_name__ : Tuple , ): """simple docstring""" super().__init__(**__magic_name__ ) lowerCAmelCase__ = backbone lowerCAmelCase__ = num_channels lowerCAmelCase__ = features_only lowerCAmelCase__ = use_pretrained_backbone lowerCAmelCase__ = True lowerCAmelCase__ = out_indices if out_indices is not None else (-1,)
48
0
import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() _snake_case : Any = logging.get_logger(__name__) def a_ ( lowerCAmelCase_ : str ): __lowerCAmelCase = SwinConfig.from_pretrained( 'microsoft/swin-tiny-patch4-window7-224', out_features=['stage1', 'stage2', 'stage3', 'stage4'] ) __lowerCAmelCase = MaskFormerConfig(backbone_config=lowerCAmelCase_ ) __lowerCAmelCase = 'huggingface/label-files' if "ade20k-full" in model_name: # this should be ok __lowerCAmelCase = 847 __lowerCAmelCase = 'maskformer-ade20k-full-id2label.json' elif "ade" in model_name: # this should be ok __lowerCAmelCase = 150 __lowerCAmelCase = 'ade20k-id2label.json' elif "coco-stuff" in model_name: # this should be ok __lowerCAmelCase = 171 __lowerCAmelCase = 'maskformer-coco-stuff-id2label.json' elif "coco" in model_name: # TODO __lowerCAmelCase = 133 __lowerCAmelCase = 'coco-panoptic-id2label.json' elif "cityscapes" in model_name: # this should be ok __lowerCAmelCase = 19 __lowerCAmelCase = 'cityscapes-id2label.json' elif "vistas" in model_name: # this should be ok __lowerCAmelCase = 65 __lowerCAmelCase = 'mapillary-vistas-id2label.json' __lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) ) __lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()} return config def a_ ( lowerCAmelCase_ : Tuple ): __lowerCAmelCase = [] # stem # fmt: off rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') ) rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') ) rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') ) rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") ) if i < 3: rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") ) rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") ) rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") ) rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") ) rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") ) # FPN rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') ) rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') ) rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') ) for source_index, target_index in zip(range(3, 0, -1 ), range(0, 3 ) ): rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") ) rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") ) rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") ) rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") ) rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") ) rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") ) rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') ) rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") ) # cross-attention out projection rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") ) # MLP 1 rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") ) # MLP 2 rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") ) # layernorm 1 (self-attention layernorm) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") ) # layernorm 2 (cross-attention layernorm) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") ) # layernorm 3 (final layernorm) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") ) rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') ) rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') ) # heads on top rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') ) rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') ) rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') ) rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') ) rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') ) for i in range(3 ): rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") ) # fmt: on return rename_keys def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Tuple ): __lowerCAmelCase = dct.pop(lowerCAmelCase_ ) __lowerCAmelCase = val def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : int ): __lowerCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): __lowerCAmelCase = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) __lowerCAmelCase = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" ) __lowerCAmelCase = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict __lowerCAmelCase = in_proj_weight[:dim, :] __lowerCAmelCase = in_proj_bias[: dim] __lowerCAmelCase = in_proj_weight[ dim : dim * 2, : ] __lowerCAmelCase = in_proj_bias[ dim : dim * 2 ] __lowerCAmelCase = in_proj_weight[ -dim :, : ] __lowerCAmelCase = in_proj_bias[-dim :] # fmt: on def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : Dict ): # fmt: off __lowerCAmelCase = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) __lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" ) __lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict __lowerCAmelCase = in_proj_weight[: hidden_size, :] __lowerCAmelCase = in_proj_bias[:config.hidden_size] __lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :] __lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2] __lowerCAmelCase = in_proj_weight[-hidden_size :, :] __lowerCAmelCase = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) __lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" ) __lowerCAmelCase = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict __lowerCAmelCase = in_proj_weight[: hidden_size, :] __lowerCAmelCase = in_proj_bias[:config.hidden_size] __lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :] __lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2] __lowerCAmelCase = in_proj_weight[-hidden_size :, :] __lowerCAmelCase = in_proj_bias[-hidden_size :] # fmt: on def a_ ( ): __lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' __lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw ) return im @torch.no_grad() def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : str, lowerCAmelCase_ : str, lowerCAmelCase_ : bool = False ): __lowerCAmelCase = get_maskformer_config(lowerCAmelCase_ ) # load original state_dict with open(lowerCAmelCase_, 'rb' ) as f: __lowerCAmelCase = pickle.load(lowerCAmelCase_ ) __lowerCAmelCase = data['model'] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys __lowerCAmelCase = create_rename_keys(lowerCAmelCase_ ) for src, dest in rename_keys: rename_key(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ) read_in_swin_q_k_v(lowerCAmelCase_, config.backbone_config ) read_in_decoder_q_k_v(lowerCAmelCase_, lowerCAmelCase_ ) # update to torch tensors for key, value in state_dict.items(): __lowerCAmelCase = torch.from_numpy(lowerCAmelCase_ ) # load 🤗 model __lowerCAmelCase = MaskFormerForInstanceSegmentation(lowerCAmelCase_ ) model.eval() for name, param in model.named_parameters(): print(lowerCAmelCase_, param.shape ) __lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(lowerCAmelCase_, strict=lowerCAmelCase_ ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(lowerCAmelCase_ ) == 0, F"""Unexpected keys: {unexpected_keys}""" # verify results __lowerCAmelCase = prepare_img() if "vistas" in model_name: __lowerCAmelCase = 65 elif "cityscapes" in model_name: __lowerCAmelCase = 6_5535 else: __lowerCAmelCase = 255 __lowerCAmelCase = True if 'ade' in model_name else False __lowerCAmelCase = MaskFormerImageProcessor(ignore_index=lowerCAmelCase_, reduce_labels=lowerCAmelCase_ ) __lowerCAmelCase = image_processor(lowerCAmelCase_, return_tensors='pt' ) __lowerCAmelCase = model(**lowerCAmelCase_ ) print('Logits:', outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": __lowerCAmelCase = torch.tensor( [[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3], lowerCAmelCase_, atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" ) Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ ) model.save_pretrained(lowerCAmelCase_ ) image_processor.save_pretrained(lowerCAmelCase_ ) if push_to_hub: print('Pushing model and image processor to the hub...' ) model.push_to_hub(F"""nielsr/{model_name}""" ) image_processor.push_to_hub(F"""nielsr/{model_name}""" ) if __name__ == "__main__": _snake_case : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='maskformer-swin-tiny-ade', type=str, help=('Name of the MaskFormer model you\'d like to convert',), ) parser.add_argument( '--checkpoint_path', default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl', type=str, help='Path to the original state dict (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) _snake_case : List[str] = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
53
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Tuple = 'Salesforce/blip-image-captioning-base' snake_case__ :List[Any] = ( 'This is a tool that generates a description of an image. It takes an input named `image` which should be the ' 'image to caption, and returns a text that contains the description in English.' ) snake_case__ :List[Any] = 'image_captioner' snake_case__ :Optional[int] = AutoModelForVisionaSeq snake_case__ :Optional[int] = ['image'] snake_case__ :Any = ['text'] def __init__( self : str , *__magic_name__ : List[str] , **__magic_name__ : Tuple ): """simple docstring""" requires_backends(self , ["vision"] ) super().__init__(*__magic_name__ , **__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : "Image" ): """simple docstring""" return self.pre_processor(images=__magic_name__ , return_tensors="pt" ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Tuple ): """simple docstring""" return self.model.generate(**__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Optional[int] ): """simple docstring""" return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0].strip()
48
0
def a__ ( lowercase__ ): '''simple docstring''' UpperCAmelCase_ =[int(lowercase__ ) for i in ip_va_address.split("." ) if i.isdigit()] return len(lowercase__ ) == 4 and all(0 <= int(lowercase__ ) <= 2_5_4 for octet in octets ) if __name__ == "__main__": __lowercase : str =input().strip() __lowercase : Union[str, Any] ="""valid""" if is_ip_va_address_valid(ip) else """invalid""" print(f"""{ip} is a {valid_or_invalid} IP v4 address.""")
54
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging UpperCAmelCase__ : Tuple = logging.get_logger(__name__) UpperCAmelCase__ : Union[str, Any] = "▁" UpperCAmelCase__ : List[str] = {"vocab_file": "sentencepiece.bpe.model"} UpperCAmelCase__ : Union[str, Any] = { "vocab_file": { "facebook/mbart-large-50-one-to-many-mmt": ( "https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model" ), } } UpperCAmelCase__ : Optional[Any] = { "facebook/mbart-large-50-one-to-many-mmt": 10_24, } # fmt: off UpperCAmelCase__ : Tuple = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"] class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Optional[int] = VOCAB_FILES_NAMES snake_case__ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ :Any = PRETRAINED_VOCAB_FILES_MAP snake_case__ :Tuple = ['input_ids', 'attention_mask'] snake_case__ :List[int] = [] snake_case__ :List[int] = [] def __init__( self : int , __magic_name__ : int , __magic_name__ : Dict=None , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[int]="</s>" , __magic_name__ : List[Any]="</s>" , __magic_name__ : List[Any]="<s>" , __magic_name__ : Tuple="<unk>" , __magic_name__ : List[Any]="<pad>" , __magic_name__ : List[Any]="<mask>" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : List[Any] , ): """simple docstring""" lowerCAmelCase__ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs lowerCAmelCase__ = kwargs.get("additional_special_tokens" , [] ) kwargs["additional_special_tokens"] += [ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=__magic_name__ , tgt_lang=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , ) lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__magic_name__ ) ) lowerCAmelCase__ = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token lowerCAmelCase__ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowerCAmelCase__ = 1 lowerCAmelCase__ = len(self.sp_model ) lowerCAmelCase__ = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__magic_name__ ) } lowerCAmelCase__ = {v: k for k, v in self.lang_code_to_id.items()} lowerCAmelCase__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) lowerCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} lowerCAmelCase__ = src_lang if src_lang is not None else "en_XX" lowerCAmelCase__ = self.lang_code_to_id[self._src_lang] lowerCAmelCase__ = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def __SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def __SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" return self._src_lang @src_lang.setter def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ): """simple docstring""" lowerCAmelCase__ = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : Dict ): """simple docstring""" lowerCAmelCase__ = self.__dict__.copy() lowerCAmelCase__ = None return state def __setstate__( self : List[Any] , __magic_name__ : Dict ): """simple docstring""" lowerCAmelCase__ = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): lowerCAmelCase__ = {} lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" lowerCAmelCase__ = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : str ): """simple docstring""" return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ): """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowerCAmelCase__ = self.sp_model.PieceToId(__magic_name__ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : int ): """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[Any] ): """simple docstring""" lowerCAmelCase__ = [] lowerCAmelCase__ = "" lowerCAmelCase__ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__magic_name__ ) + token lowerCAmelCase__ = True lowerCAmelCase__ = [] else: current_sub_tokens.append(__magic_name__ ) lowerCAmelCase__ = False out_string += self.sp_model.decode(__magic_name__ ) return out_string.strip() def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ): """simple docstring""" if not os.path.isdir(__magic_name__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ = os.path.join( __magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __magic_name__ ) elif not os.path.isfile(self.vocab_file ): with open(__magic_name__ , "wb" ) as fi: lowerCAmelCase__ = self.sp_model.serialized_model_proto() fi.write(__magic_name__ ) return (out_vocab_file,) def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ ) lowerCAmelCase__ = [1] * len(self.prefix_tokens ) lowerCAmelCase__ = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__magic_name__ )) + suffix_ones return prefix_ones + ([0] * len(__magic_name__ )) + ([0] * len(__magic_name__ )) + suffix_ones def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Optional[str] , __magic_name__ : Optional[str] , **__magic_name__ : Optional[Any] ): """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) lowerCAmelCase__ = src_lang lowerCAmelCase__ = self(__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ ) lowerCAmelCase__ = self.convert_tokens_to_ids(__magic_name__ ) lowerCAmelCase__ = tgt_lang_id return inputs def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : str = "en_XX" , __magic_name__ : Optional[List[str]] = None , __magic_name__ : str = "ro_RO" , **__magic_name__ : Union[str, Any] , ): """simple docstring""" lowerCAmelCase__ = src_lang lowerCAmelCase__ = tgt_lang return super().prepare_seqaseq_batch(__magic_name__ , __magic_name__ , **__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ): """simple docstring""" lowerCAmelCase__ = self.lang_code_to_id[src_lang] lowerCAmelCase__ = [self.cur_lang_code_id] lowerCAmelCase__ = [self.eos_token_id] def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ): """simple docstring""" lowerCAmelCase__ = self.lang_code_to_id[tgt_lang] lowerCAmelCase__ = [self.cur_lang_code_id] lowerCAmelCase__ = [self.eos_token_id]
48
0
import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def UpperCAmelCase ( a_ ) -> List[Any]: """simple docstring""" if ( (cp >= 0x4_e_0_0 and cp <= 0x9_f_f_f) or (cp >= 0x3_4_0_0 and cp <= 0x4_d_b_f) # or (cp >= 0x2_0_0_0_0 and cp <= 0x2_a_6_d_f) # or (cp >= 0x2_a_7_0_0 and cp <= 0x2_b_7_3_f) # or (cp >= 0x2_b_7_4_0 and cp <= 0x2_b_8_1_f) # or (cp >= 0x2_b_8_2_0 and cp <= 0x2_c_e_a_f) # or (cp >= 0xf_9_0_0 and cp <= 0xf_a_f_f) or (cp >= 0x2_f_8_0_0 and cp <= 0x2_f_a_1_f) # ): # return True return False def UpperCAmelCase ( a_ ) -> Optional[Any]: """simple docstring""" for char in word: __A = ord(a_ ) if not _is_chinese_char(a_ ): return 0 return 1 def UpperCAmelCase ( a_ ) -> Tuple: """simple docstring""" __A = set() for token in tokens: __A = len(a_ ) > 1 and is_chinese(a_ ) if chinese_word: word_set.add(a_ ) __A = list(a_ ) return word_list def UpperCAmelCase ( a_ , a_ ) -> Optional[Any]: """simple docstring""" if not chinese_word_set: return bert_tokens __A = max([len(a_ ) for w in chinese_word_set] ) __A = bert_tokens __A , __A = 0, len(a_ ) while start < end: __A = True if is_chinese(bert_word[start] ): __A = min(end - start , a_ ) for i in range(a_ , 1 , -1 ): __A = "".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): __A = "##" + bert_word[j] __A = start + i __A = False break if single_word: start += 1 return bert_word def UpperCAmelCase ( a_ , a_ , a_ ) -> Optional[int]: """simple docstring""" __A = [] for i in range(0 , len(a_ ) , 1_0_0 ): __A = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=["cws"] ).cws __A = [get_chinese_word(a_ ) for r in res] ltp_res.extend(a_ ) assert len(a_ ) == len(a_ ) __A = [] for i in range(0 , len(a_ ) , 1_0_0 ): __A = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=a_ , truncation=a_ , max_length=5_1_2 ) bert_res.extend(res["input_ids"] ) assert len(a_ ) == len(a_ ) __A = [] for input_ids, chinese_word in zip(a_ , a_ ): __A = [] for id in input_ids: __A = bert_tokenizer._convert_id_to_token(a_ ) input_tokens.append(a_ ) __A = add_sub_symbol(a_ , a_ ) __A = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(a_ ): if token[:2] == "##": __A = token[2:] # save chinese tokens' pos if len(a_ ) == 1 and _is_chinese_char(ord(a_ ) ): ref_id.append(a_ ) ref_ids.append(a_ ) assert len(a_ ) == len(a_ ) return ref_ids def UpperCAmelCase ( a_ ) -> Tuple: """simple docstring""" with open(args.file_name , "r" , encoding="utf-8" ) as f: __A = f.readlines() __A = [line.strip() for line in data if len(a_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' __A = LTP(args.ltp ) # faster in GPU device __A = BertTokenizer.from_pretrained(args.bert ) __A = prepare_ref(a_ , a_ , a_ ) with open(args.save_path , "w" , encoding="utf-8" ) as f: __A = [json.dumps(a_ ) + "\n" for ref in ref_ids] f.writelines(a_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE :List[Any] = argparse.ArgumentParser(description='prepare_chinese_ref') parser.add_argument( '--file_name', required=False, type=str, default='./resources/chinese-demo.txt', help='file need process, same as training data in lm', ) parser.add_argument( '--ltp', required=False, type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path', ) parser.add_argument( '--bert', required=False, type=str, default='./resources/robert', help='resources for Bert tokenizer', ) parser.add_argument( '--save_path', required=False, type=str, default='./resources/ref.txt', help='path to save res', ) SCREAMING_SNAKE_CASE :Optional[int] = parser.parse_args() main(args)
55
'''simple docstring''' from random import randint from tempfile import TemporaryFile import numpy as np def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] ) -> Dict: '''simple docstring''' lowerCAmelCase__ = 0 if start < end: lowerCAmelCase__ = randint(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ = a[end] lowerCAmelCase__ = a[pivot] lowerCAmelCase__ = temp lowerCAmelCase__ ,lowerCAmelCase__ = _in_place_partition(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) count += _in_place_quick_sort(UpperCamelCase_ , UpperCamelCase_ , p - 1 ) count += _in_place_quick_sort(UpperCamelCase_ , p + 1 , UpperCamelCase_ ) return count def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any ) -> Dict: '''simple docstring''' lowerCAmelCase__ = 0 lowerCAmelCase__ = randint(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ = a[end] lowerCAmelCase__ = a[pivot] lowerCAmelCase__ = temp lowerCAmelCase__ = start - 1 for index in range(UpperCamelCase_ , UpperCamelCase_ ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value lowerCAmelCase__ = new_pivot_index + 1 lowerCAmelCase__ = a[new_pivot_index] lowerCAmelCase__ = a[index] lowerCAmelCase__ = temp lowerCAmelCase__ = a[new_pivot_index + 1] lowerCAmelCase__ = a[end] lowerCAmelCase__ = temp return new_pivot_index + 1, count UpperCAmelCase__ : Tuple = TemporaryFile() UpperCAmelCase__ : List[str] = 1_00 # 1000 elements are to be sorted UpperCAmelCase__ , UpperCAmelCase__ : Dict = 0, 1 # mean and standard deviation UpperCAmelCase__ : Tuple = np.random.normal(mu, sigma, p) np.save(outfile, X) print("The array is") print(X) outfile.seek(0) # using the same array UpperCAmelCase__ : Optional[Any] = np.load(outfile) UpperCAmelCase__ : Any = len(M) - 1 UpperCAmelCase__ : Tuple = _in_place_quick_sort(M, 0, r) print( "No of Comparisons for 100 elements selected from a standard normal distribution" "is :" ) print(z)
48
0
'''simple docstring''' from __future__ import annotations from collections import namedtuple def _a (lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> tuple: """simple docstring""" __snake_case = namedtuple('result' , 'name value' ) if (voltage, current, power).count(0 ) != 1: raise ValueError('Only one argument must be 0' ) elif power < 0: raise ValueError( 'Power cannot be negative in any electrical/electronics system' ) elif voltage == 0: return result('voltage' , power / current ) elif current == 0: return result('current' , power / voltage ) elif power == 0: return result('power' , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError('Exactly one argument must be 0' ) if __name__ == "__main__": import doctest doctest.testmod()
56
'''simple docstring''' import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def A ( UpperCamelCase_ : List[Any] ) -> Tuple: '''simple docstring''' if "img_encoder.pos_embed" in name: lowerCAmelCase__ = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" ) if "img_encoder.patch_embed.proj" in name: lowerCAmelCase__ = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" ) if "img_encoder.patch_embed.norm" in name: lowerCAmelCase__ = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" ) if "img_encoder.layers" in name: lowerCAmelCase__ = name.replace("img_encoder.layers" , "vision_model.encoder.stages" ) if "blocks" in name and "res" not in name: lowerCAmelCase__ = name.replace("blocks" , "layers" ) if "attn" in name and "pre_assign" not in name: lowerCAmelCase__ = name.replace("attn" , "self_attn" ) if "proj" in name and "self_attn" in name and "text" not in name: lowerCAmelCase__ = name.replace("proj" , "out_proj" ) if "pre_assign_attn.attn.proj" in name: lowerCAmelCase__ = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" ) if "norm1" in name: lowerCAmelCase__ = name.replace("norm1" , "layer_norm1" ) if "norm2" in name and "pre_assign" not in name: lowerCAmelCase__ = name.replace("norm2" , "layer_norm2" ) if "img_encoder.norm" in name: lowerCAmelCase__ = name.replace("img_encoder.norm" , "vision_model.layernorm" ) # text encoder if "text_encoder.token_embedding" in name: lowerCAmelCase__ = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" ) if "text_encoder.positional_embedding" in name: lowerCAmelCase__ = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" ) if "text_encoder.transformer.resblocks." in name: lowerCAmelCase__ = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." ) if "ln_1" in name: lowerCAmelCase__ = name.replace("ln_1" , "layer_norm1" ) if "ln_2" in name: lowerCAmelCase__ = name.replace("ln_2" , "layer_norm2" ) if "c_fc" in name: lowerCAmelCase__ = name.replace("c_fc" , "fc1" ) if "c_proj" in name: lowerCAmelCase__ = name.replace("c_proj" , "fc2" ) if "text_encoder" in name: lowerCAmelCase__ = name.replace("text_encoder" , "text_model" ) if "ln_final" in name: lowerCAmelCase__ = name.replace("ln_final" , "final_layer_norm" ) # projection layers if "img_projector.linear_hidden." in name: lowerCAmelCase__ = name.replace("img_projector.linear_hidden." , "visual_projection." ) if "img_projector.linear_out." in name: lowerCAmelCase__ = name.replace("img_projector.linear_out." , "visual_projection.3." ) if "text_projector.linear_hidden" in name: lowerCAmelCase__ = name.replace("text_projector.linear_hidden" , "text_projection" ) if "text_projector.linear_out" in name: lowerCAmelCase__ = name.replace("text_projector.linear_out" , "text_projection.3" ) return name def A ( UpperCamelCase_ : str , UpperCamelCase_ : str ) -> List[Any]: '''simple docstring''' for key in orig_state_dict.copy().keys(): lowerCAmelCase__ = orig_state_dict.pop(UpperCamelCase_ ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors lowerCAmelCase__ = key.split("." ) lowerCAmelCase__ ,lowerCAmelCase__ = int(key_split[2] ), int(key_split[4] ) lowerCAmelCase__ = config.vision_config.hidden_size if "weight" in key: lowerCAmelCase__ = val[:dim, :] lowerCAmelCase__ = val[dim : dim * 2, :] lowerCAmelCase__ = val[-dim:, :] else: lowerCAmelCase__ = val[:dim] lowerCAmelCase__ = val[dim : dim * 2] lowerCAmelCase__ = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors lowerCAmelCase__ = key.split("." ) lowerCAmelCase__ = int(key_split[3] ) lowerCAmelCase__ = config.text_config.hidden_size if "weight" in key: lowerCAmelCase__ = val[:dim, :] lowerCAmelCase__ = val[ dim : dim * 2, : ] lowerCAmelCase__ = val[-dim:, :] else: lowerCAmelCase__ = val[:dim] lowerCAmelCase__ = val[dim : dim * 2] lowerCAmelCase__ = val[-dim:] else: lowerCAmelCase__ = rename_key(UpperCamelCase_ ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): lowerCAmelCase__ = val.squeeze_() else: lowerCAmelCase__ = val return orig_state_dict def A ( ) -> Optional[int]: '''simple docstring''' lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg" lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ) return im @torch.no_grad() def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple="groupvit-gcc-yfcc" , UpperCamelCase_ : Dict=False ) -> Any: '''simple docstring''' lowerCAmelCase__ = GroupViTConfig() lowerCAmelCase__ = GroupViTModel(UpperCamelCase_ ).eval() lowerCAmelCase__ = torch.load(UpperCamelCase_ , map_location="cpu" )["model"] lowerCAmelCase__ = convert_state_dict(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase__ ,lowerCAmelCase__ = model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCamelCase_ ) == 0) # verify result lowerCAmelCase__ = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" ) lowerCAmelCase__ = prepare_img() lowerCAmelCase__ = processor(text=["a photo of a cat", "a photo of a dog"] , images=UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors="pt" ) with torch.no_grad(): lowerCAmelCase__ = model(**UpperCamelCase_ ) if model_name == "groupvit-gcc-yfcc": lowerCAmelCase__ = torch.tensor([[13.3_523, 6.3_629]] ) elif model_name == "groupvit-gcc-redcaps": lowerCAmelCase__ = torch.tensor([[16.1_873, 8.6_230]] ) else: raise ValueError(F"""Model name {model_name} not supported.""" ) assert torch.allclose(outputs.logits_per_image , UpperCamelCase_ , atol=1E-3 ) processor.save_pretrained(UpperCamelCase_ ) model.save_pretrained(UpperCamelCase_ ) print("Successfully saved processor and model to" , UpperCamelCase_ ) if push_to_hub: print("Pushing to the hub..." ) processor.push_to_hub(UpperCamelCase_ , organization="nielsr" ) model.push_to_hub(UpperCamelCase_ , organization="nielsr" ) if __name__ == "__main__": UpperCAmelCase__ : List[str] = argparse.ArgumentParser() parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model." ) parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint") parser.add_argument( "--model_name", default="groupvit-gccy-fcc", type=str, help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'", ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.", ) UpperCAmelCase__ : Any = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
48
0
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin A_ : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece_no_bos.model') @require_sentencepiece @require_tokenizers class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" a : str =PegasusTokenizer a : List[Any] =PegasusTokenizerFast a : Optional[Any] =True a : List[Any] =True def _a ( self ): super().setUp() # We have a SentencePiece fixture for testing UpperCamelCase_: List[Any] = PegasusTokenizer(_lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _a ( self ): return PegasusTokenizer.from_pretrained('google/pegasus-large' ) def _a ( self , **_lowerCamelCase ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def _a ( self , _lowerCamelCase ): return ("This is a test", "This is a test") def _a ( self ): UpperCamelCase_: Optional[Any] = '</s>' UpperCamelCase_: Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase ) def _a ( self ): UpperCamelCase_: Optional[int] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<pad>' ) self.assertEqual(vocab_keys[1] , '</s>' ) self.assertEqual(vocab_keys[-1] , 'v' ) self.assertEqual(len(_lowerCamelCase ) , 1_1_0_3 ) def _a ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 ) def _a ( self ): UpperCamelCase_: List[str] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) UpperCamelCase_: Dict = self.tokenizer_class.from_pretrained(self.tmpdirname ) UpperCamelCase_: int = ( 'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important' ' </s> <pad> <pad> <pad>' ) UpperCamelCase_: Any = rust_tokenizer([raw_input_str] , return_tensors=_lowerCamelCase , add_special_tokens=_lowerCamelCase ).input_ids[0] UpperCamelCase_: int = py_tokenizer([raw_input_str] , return_tensors=_lowerCamelCase , add_special_tokens=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self ): UpperCamelCase_: Dict = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word UpperCamelCase_: int = '<mask_1> To ensure a <mask_2> flow of bank resolutions.' UpperCamelCase_: Optional[int] = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1] UpperCamelCase_: Dict = tokenizer([raw_input_str] , return_tensors=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self ): UpperCamelCase_: List[Any] = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6_1_0_3 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 1_0_3 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1_0_2_4 UpperCamelCase_: Union[str, Any] = 'To ensure a smooth flow of bank resolutions.' UpperCamelCase_: List[Any] = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1] UpperCamelCase_: Optional[Any] = tokenizer([raw_input_str] , return_tensors=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def _a ( self ): UpperCamelCase_: Optional[int] = ['This is going to be way too long.' * 1_5_0, 'short example'] UpperCamelCase_: Optional[Any] = ['not super long but more than 5 tokens', 'tiny'] UpperCamelCase_: Union[str, Any] = self._large_tokenizer(_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors='pt' ) UpperCamelCase_: int = self._large_tokenizer( text_target=_lowerCamelCase , max_length=5 , padding=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors='pt' ) assert batch.input_ids.shape == (2, 1_0_2_4) assert batch.attention_mask.shape == (2, 1_0_2_4) assert targets["input_ids"].shape == (2, 5) assert len(_lowerCamelCase ) == 2 # input_ids, attention_mask. @slow def _a ( self ): # fmt: off UpperCamelCase_: Optional[Any] = {'input_ids': [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCamelCase , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , ) @require_sentencepiece @require_tokenizers class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" a : Optional[int] =PegasusTokenizer a : int =PegasusTokenizerFast a : List[str] =True a : List[Any] =True def _a ( self ): super().setUp() # We have a SentencePiece fixture for testing UpperCamelCase_: Optional[int] = PegasusTokenizer(_lowerCamelCase , offset=0 , mask_token_sent=_lowerCamelCase , mask_token='[MASK]' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _a ( self ): return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' ) def _a ( self , **_lowerCamelCase ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def _a ( self , _lowerCamelCase ): return ("This is a test", "This is a test") def _a ( self ): UpperCamelCase_: List[str] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) UpperCamelCase_: Any = self.tokenizer_class.from_pretrained(self.tmpdirname ) UpperCamelCase_: Tuple = ( 'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>' ' <pad> <pad> <pad>' ) UpperCamelCase_: str = rust_tokenizer([raw_input_str] , return_tensors=_lowerCamelCase , add_special_tokens=_lowerCamelCase ).input_ids[0] UpperCamelCase_: Dict = py_tokenizer([raw_input_str] , return_tensors=_lowerCamelCase , add_special_tokens=_lowerCamelCase ).input_ids[0] self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) @require_torch def _a ( self ): UpperCamelCase_: List[str] = ['This is going to be way too long.' * 1_0_0_0, 'short example'] UpperCamelCase_: Optional[Any] = ['not super long but more than 5 tokens', 'tiny'] UpperCamelCase_: Dict = self._large_tokenizer(_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors='pt' ) UpperCamelCase_: Optional[int] = self._large_tokenizer( text_target=_lowerCamelCase , max_length=5 , padding=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors='pt' ) assert batch.input_ids.shape == (2, 4_0_9_6) assert batch.attention_mask.shape == (2, 4_0_9_6) assert targets["input_ids"].shape == (2, 5) assert len(_lowerCamelCase ) == 2 # input_ids, attention_mask. def _a ( self ): UpperCamelCase_: Optional[Any] = ( 'This is an example string that is used to test the original TF implementation against the HF' ' implementation' ) UpperCamelCase_: int = self._large_tokenizer(_lowerCamelCase ).input_ids self.assertListEqual( _lowerCamelCase , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
57
'''simple docstring''' from __future__ import annotations from functools import lru_cache from math import ceil UpperCAmelCase__ : Optional[Any] = 1_00 UpperCAmelCase__ : Any = set(range(3, NUM_PRIMES, 2)) primes.add(2) UpperCAmelCase__ : int for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=1_00 ) def A ( UpperCamelCase_ : int ) -> set[int]: '''simple docstring''' if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} lowerCAmelCase__ = set() lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def A ( UpperCamelCase_ : int = 50_00 ) -> int | None: '''simple docstring''' for number_to_partition in range(1 , UpperCamelCase_ ): if len(partition(UpperCamelCase_ ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(F"{solution() = }")
48
0
"""simple docstring""" import os import re import shutil import sys import tempfile import unittest import black __lowerCAmelCase : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated. __lowerCAmelCase : List[Any] = ''' def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states ''' class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' snake_case_ : Union[str, Any] = tempfile.mkdtemp() os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) ) snake_case_ : str = self.transformer_dir shutil.copy( os.path.join(_lowercase , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' snake_case_ : Optional[Any] = """src/transformers""" shutil.rmtree(self.transformer_dir ) def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase=None ) -> List[str]: '''simple docstring''' snake_case_ : Optional[Any] = comment + f'\nclass {class_name}(nn.Module):\n' + class_code if overwrite_result is not None: snake_case_ : str = comment + f'\nclass {class_name}(nn.Module):\n' + overwrite_result snake_case_ : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 ) snake_case_ : Any = black.format_str(_lowercase , mode=_lowercase ) snake_case_ : Tuple = os.path.join(self.transformer_dir , """new_code.py""" ) with open(_lowercase , """w""" , newline="""\n""" ) as f: f.write(_lowercase ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(_lowercase ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=_lowercase ) with open(_lowercase , """r""" ) as f: self.assertTrue(f.read() , _lowercase ) def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' snake_case_ : Union[str, Any] = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" ) self.assertEqual(_lowercase , _lowercase ) def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' self.check_copy_consistency( """# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , ) # With no empty line at the end self.check_copy_consistency( """# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , _lowercase , ) # Copy consistency with rename self.check_copy_consistency( """# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , _lowercase ) , ) # Copy consistency with a really long name snake_case_ : Any = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason""" self.check_copy_consistency( f'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' , f'{long_class_name}LMPredictionHead' , re.sub("""Bert""" , _lowercase , _lowercase ) , ) # Copy consistency with overwrite self.check_copy_consistency( """# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , _lowercase , overwrite_result=re.sub("""Bert""" , """TestModel""" , _lowercase ) , ) def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' snake_case_ : Dict = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""] snake_case_ : str = ( """1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the""" """ Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for""" """ Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong""" """ Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.""" """ **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),""" """ released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and""" """ lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same""" """ method has been applied to compress GPT2 into""" """ [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into""" """ [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),""" """ Multilingual BERT into""" """ [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German""" """ version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**""" """ (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders""" """ as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang""" """ Luong, Quoc V. Le, Christopher D. Manning.""" ) snake_case_ : List[Any] = ( """1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the""" """ Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of""" """ Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian""" """ Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n""" ) snake_case_ : Optional[Any] = ( """1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the""" """ Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of""" """ Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian""" """ Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.""" """ **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文""" """ [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and""" """ lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same""" """ method has been applied to compress GPT2 into""" """ [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into""" """ [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),""" """ Multilingual BERT into""" """ [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German""" """ version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自""" """ Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather""" """ than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,""" """ Christopher D. Manning 发布。\n""" ) snake_case_ , snake_case_ : Optional[Any] = check_copies.convert_to_localized_md( _lowercase , _lowercase , localized_readme["""format_model_list"""] ) self.assertFalse(_lowercase ) self.assertEqual(_lowercase , _lowercase ) snake_case_ , snake_case_ : List[Any] = check_copies.convert_to_localized_md( _lowercase , _lowercase , localized_readme["""format_model_list"""] ) # Check whether the number of models is equal to README.md after conversion. self.assertTrue(_lowercase ) snake_case_ : Union[str, Any] = ( """1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the""" """ Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for""" """ Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong""" """ Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.""" ) snake_case_ : Optional[int] = ( """1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and""" """ the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of""" """ Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian""" """ Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n""" ) snake_case_ : Optional[int] = ( """1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the""" """ Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of""" """ Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian""" """ Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n""" ) snake_case_ , snake_case_ : Optional[Any] = check_copies.convert_to_localized_md( _lowercase , _lowercase , localized_readme["""format_model_list"""] ) # Check if the model link is synchronized. self.assertEqual(_lowercase , _lowercase )
58
'''simple docstring''' import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase__ : List[Any] = logging.get_logger(__name__) UpperCAmelCase__ : List[str] = {"vocab_file": "vocab.json"} UpperCAmelCase__ : Optional[Any] = { "vocab_file": { "mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json", } } UpperCAmelCase__ : Union[str, Any] = {"mgp-str": 27} class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Any = VOCAB_FILES_NAMES snake_case__ :Dict = PRETRAINED_VOCAB_FILES_MAP snake_case__ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : int="[GO]" , __magic_name__ : Optional[Any]="[GO]" , __magic_name__ : List[str]="[s]" , __magic_name__ : str="[GO]" , **__magic_name__ : List[Any] ): """simple docstring""" super().__init__( unk_token=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , pad_token=__magic_name__ , **__magic_name__ , ) with open(__magic_name__ , encoding="utf-8" ) as vocab_handle: lowerCAmelCase__ = json.load(__magic_name__ ) lowerCAmelCase__ = {v: k for k, v in self.vocab.items()} @property def __SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" return len(self.vocab ) def __SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Dict ): """simple docstring""" lowerCAmelCase__ = [] for s in text: char_tokens.extend(__magic_name__ ) return char_tokens def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str ): """simple docstring""" return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) ) def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : Tuple ): """simple docstring""" return self.decoder.get(__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str , __magic_name__ : Optional[str] = None ): """simple docstring""" if not os.path.isdir(__magic_name__ ): logger.error("Vocabulary path ({}) should be a directory".format(__magic_name__ ) ) return lowerCAmelCase__ = os.path.join( __magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) with open(__magic_name__ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=__magic_name__ , ensure_ascii=__magic_name__ ) + "\n" ) return (vocab_file,)
48
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __A = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["FNetTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ["FNetTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "FNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FNetForMaskedLM", "FNetForMultipleChoice", "FNetForNextSentencePrediction", "FNetForPreTraining", "FNetForQuestionAnswering", "FNetForSequenceClassification", "FNetForTokenClassification", "FNetLayer", "FNetModel", "FNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
59
'''simple docstring''' from math import sqrt def A ( UpperCamelCase_ : int ) -> int: '''simple docstring''' lowerCAmelCase__ = 0 for i in range(1 , int(sqrt(UpperCamelCase_ ) + 1 ) ): if n % i == 0 and i != sqrt(UpperCamelCase_ ): total += i + n // i elif i == sqrt(UpperCamelCase_ ): total += i return total - n def A ( UpperCamelCase_ : int = 1_00_00 ) -> int: '''simple docstring''' lowerCAmelCase__ = sum( i for i in range(1 , UpperCamelCase_ ) if sum_of_divisors(sum_of_divisors(UpperCamelCase_ ) ) == i and sum_of_divisors(UpperCamelCase_ ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
48
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) lowerCAmelCase_ = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''ViTFeatureExtractor'''] lowerCAmelCase_ = ['''ViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ViTForImageClassification''', '''ViTForMaskedImageModeling''', '''ViTModel''', '''ViTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''TFViTForImageClassification''', '''TFViTModel''', '''TFViTPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''FlaxViTForImageClassification''', '''FlaxViTModel''', '''FlaxViTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
60
'''simple docstring''' import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format="%(message)s") def A ( UpperCamelCase_ : np.ndarray ) -> np.ndarray: '''simple docstring''' return input_array.reshape((input_array.size, 1) ) def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray: '''simple docstring''' lowerCAmelCase__ = np.nan for i in range(UpperCamelCase_ ): lowerCAmelCase__ = features[:, labels == i] lowerCAmelCase__ = data.mean(1 ) # Centralize the data of class i lowerCAmelCase__ = data - column_reshape(UpperCamelCase_ ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(UpperCamelCase_ , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) lowerCAmelCase__ = np.dot(UpperCamelCase_ , centered_data.T ) return covariance_sum / features.shape[1] def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray: '''simple docstring''' lowerCAmelCase__ = features.mean(1 ) lowerCAmelCase__ = np.nan for i in range(UpperCamelCase_ ): lowerCAmelCase__ = features[:, labels == i] lowerCAmelCase__ = data.shape[1] lowerCAmelCase__ = data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ ) , (column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) lowerCAmelCase__ = device_data * np.dot( column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ ) , (column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ )).T , ) return covariance_sum / features.shape[1] def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray: '''simple docstring''' if features.any(): lowerCAmelCase__ = features.mean(1 ) # Center the dataset lowerCAmelCase__ = features - np.reshape(UpperCamelCase_ , (data_mean.size, 1) ) lowerCAmelCase__ = np.dot(UpperCamelCase_ , centered_data.T ) / features.shape[1] lowerCAmelCase__ ,lowerCAmelCase__ = np.linalg.eigh(UpperCamelCase_ ) # Take all the columns in the reverse order (-1), and then takes only the first lowerCAmelCase__ = eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space lowerCAmelCase__ = np.dot(filtered_eigenvectors.T , UpperCamelCase_ ) logging.info("Principal Component Analysis computed" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=UpperCamelCase_ ) logging.error("Dataset empty" ) raise AssertionError def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> np.ndarray: '''simple docstring''' assert classes > dimensions # Check if features have been already loaded if features.any: lowerCAmelCase__ ,lowerCAmelCase__ = eigh( covariance_between_classes(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , covariance_within_classes(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , ) lowerCAmelCase__ = eigenvectors[:, ::-1][:, :dimensions] lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = np.linalg.svd(UpperCamelCase_ ) lowerCAmelCase__ = svd_matrix[:, 0:dimensions] lowerCAmelCase__ = np.dot(filtered_svd_matrix.T , UpperCamelCase_ ) logging.info("Linear Discriminant Analysis computed" ) return projected_data else: logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=UpperCamelCase_ ) logging.error("Dataset empty" ) raise AssertionError def A ( ) -> None: '''simple docstring''' lowerCAmelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) lowerCAmelCase__ = np.array([0, 0, 0, 1, 1] ) lowerCAmelCase__ = 2 lowerCAmelCase__ = 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(UpperCamelCase_ ) as error_info: lowerCAmelCase__ = linear_discriminant_analysis( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) if isinstance(UpperCamelCase_ , np.ndarray ): raise AssertionError( "Did not raise AssertionError for dimensions > classes" ) assert error_info.type is AssertionError def A ( ) -> None: '''simple docstring''' lowerCAmelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) lowerCAmelCase__ = 2 lowerCAmelCase__ = np.array([[6.92_820_323, 8.66_025_404, 10.39_230_485], [3.0, 3.0, 3.0]] ) with pytest.raises(UpperCamelCase_ ) as error_info: lowerCAmelCase__ = principal_component_analysis(UpperCamelCase_ , UpperCamelCase_ ) if not np.allclose(UpperCamelCase_ , UpperCamelCase_ ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
48
0