code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @require_torch def _a ( self : Any ): """simple docstring""" A_ : Optional[Any] = pipeline( task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' ) A_ : str = load_dataset('''ashraq/esc50''' ) A_ : str = dataset['''train''']['''audio'''][-1]['''array'''] A_ : int = audio_classifier(_lowerCamelCase , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] ) self.assertEqual( nested_simplify(_lowerCamelCase ) , [{'''score''': 0.5_01, '''label''': '''Sound of a dog'''}, {'''score''': 0.4_99, '''label''': '''Sound of vaccum cleaner'''}] , ) @unittest.skip('''No models are available in TF''' ) def _a ( self : int ): """simple docstring""" pass @slow @require_torch def _a ( self : Tuple ): """simple docstring""" A_ : List[Any] = pipeline( task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , ) # This is an audio of a dog A_ : List[Any] = load_dataset('''ashraq/esc50''' ) A_ : str = dataset['''train''']['''audio'''][-1]['''array'''] A_ : Dict = audio_classifier(_lowerCamelCase , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] ) self.assertEqual( nested_simplify(_lowerCamelCase ) , [ {'''score''': 0.9_99, '''label''': '''Sound of a dog'''}, {'''score''': 0.0_01, '''label''': '''Sound of vaccum cleaner'''}, ] , ) A_ : str = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] ) self.assertEqual( nested_simplify(_lowerCamelCase ) , [ [ {'''score''': 0.9_99, '''label''': '''Sound of a dog'''}, {'''score''': 0.0_01, '''label''': '''Sound of vaccum cleaner'''}, ], ] * 5 , ) A_ : Dict = audio_classifier( [audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 ) self.assertEqual( nested_simplify(_lowerCamelCase ) , [ [ {'''score''': 0.9_99, '''label''': '''Sound of a dog'''}, {'''score''': 0.0_01, '''label''': '''Sound of vaccum cleaner'''}, ], ] * 5 , ) @unittest.skip('''No models are available in TF''' ) def _a ( self : str ): """simple docstring""" pass
4
'''simple docstring''' import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = """▁""" snake_case__ = { """vocab_file""": """vocab.json""", """spm_file""": """sentencepiece.bpe.model""", } snake_case__ = { """vocab_file""": { """facebook/s2t-small-librispeech-asr""": ( """https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json""" ), }, """spm_file""": { """facebook/s2t-small-librispeech-asr""": ( """https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model""" ) }, } snake_case__ = { """facebook/s2t-small-librispeech-asr""": 10_24, } snake_case__ = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""] snake_case__ = {"""mustc""": MUSTC_LANGS} class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = VOCAB_FILES_NAMES _lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase = MAX_MODEL_INPUT_SIZES _lowerCAmelCase = ['input_ids', 'attention_mask'] _lowerCAmelCase = [] def __init__( self : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : str="<s>" , _lowerCamelCase : Union[str, Any]="</s>" , _lowerCamelCase : Dict="<pad>" , _lowerCamelCase : str="<unk>" , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : int=False , _lowerCamelCase : Any=None , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[Dict[str, Any]] = None , **_lowerCamelCase : Optional[int] , ): """simple docstring""" A_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , do_upper_case=_lowerCamelCase , do_lower_case=_lowerCamelCase , tgt_lang=_lowerCamelCase , lang_codes=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , ) A_ : Optional[int] = do_upper_case A_ : Tuple = do_lower_case A_ : Tuple = load_json(_lowerCamelCase ) A_ : Tuple = {v: k for k, v in self.encoder.items()} A_ : List[Any] = spm_file A_ : List[str] = load_spm(_lowerCamelCase , self.sp_model_kwargs ) if lang_codes is not None: A_ : Any = lang_codes A_ : Optional[Any] = LANGUAGES[lang_codes] A_ : Optional[Any] = [f'<lang:{lang}>' for lang in self.langs] A_ : Union[str, Any] = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs} A_ : Optional[int] = self.lang_tokens A_ : int = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: A_ : Dict = {} @property def _a ( self : Tuple ): """simple docstring""" return len(self.encoder ) @property def _a ( self : int ): """simple docstring""" return self._tgt_lang @tgt_lang.setter def _a ( self : List[str] , _lowerCamelCase : Any ): """simple docstring""" A_ : int = new_tgt_lang self.set_tgt_lang_special_tokens(_lowerCamelCase ) def _a ( self : Tuple , _lowerCamelCase : str ): """simple docstring""" A_ : List[str] = self.lang_code_to_id[tgt_lang] A_ : Optional[Any] = [lang_code_id] def _a ( self : Optional[Any] , _lowerCamelCase : str ): """simple docstring""" return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase ) def _a ( self : List[Any] , _lowerCamelCase : int ): """simple docstring""" return self.encoder.get(_lowerCamelCase , self.encoder[self.unk_token] ) def _a ( self : int , _lowerCamelCase : int ): """simple docstring""" return self.decoder.get(_lowerCamelCase , self.unk_token ) def _a ( self : int , _lowerCamelCase : List[str] ): """simple docstring""" A_ : List[Any] = [] A_ : Any = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: A_ : Union[str, Any] = self.sp_model.decode(_lowerCamelCase ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " A_ : Optional[Any] = [] else: current_sub_tokens.append(_lowerCamelCase ) A_ : Tuple = self.sp_model.decode(_lowerCamelCase ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def _a ( self : int , _lowerCamelCase : Dict , _lowerCamelCase : Any=None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def _a ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase ) A_ : Tuple = [1] * len(self.prefix_tokens ) A_ : Tuple = [1] if token_ids_a is None: return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones def _a ( self : Dict ): """simple docstring""" A_ : Union[str, Any] = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Union[str, Any] ): """simple docstring""" A_ : Dict = self.__dict__.copy() A_ : List[Any] = None return state def __setstate__( self : List[str] , _lowerCamelCase : Dict ): """simple docstring""" A_ : Dict = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): A_ : Optional[int] = {} A_ : int = load_spm(self.spm_file , self.sp_model_kwargs ) def _a ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ): """simple docstring""" A_ : Dict = Path(_lowerCamelCase ) assert save_dir.is_dir(), f'{save_directory} should be a directory' A_ : Optional[int] = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file'''] ) A_ : Optional[int] = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file'''] ) save_json(self.encoder , _lowerCamelCase ) if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , _lowerCamelCase ) elif not os.path.isfile(self.spm_file ): with open(_lowerCamelCase , '''wb''' ) as fi: A_ : List[str] = self.sp_model.serialized_model_proto() fi.write(_lowerCamelCase ) return (str(_lowerCamelCase ), str(_lowerCamelCase )) def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor: A_ : Tuple = sentencepiece.SentencePieceProcessor(**lowerCamelCase__ ) spm.Load(str(lowerCamelCase__ ) ) return spm def snake_case__ ( lowerCamelCase__ : str ) -> Union[Dict, List]: with open(lowerCamelCase__ , '''r''' ) as f: return json.load(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : str ) -> None: with open(lowerCamelCase__ , '''w''' ) as f: json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=2 )
4
1
'''simple docstring''' import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model snake_case__ = """0.12""" # assumed parallelism: 8 if is_torch_available(): import torch def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[int]=None ) -> Optional[int]: if rng is None: A_ : Any = random.Random() A_ : Optional[Any] = 1 for dim in shape: total_dims *= dim A_ : Tuple = [] for _ in range(lowerCamelCase__ ): values.append(rng.randint(0 , vocab_size - 1 ) ) A_ : str = np.array(lowerCamelCase__ , dtype=jnp.intaa ).reshape(lowerCamelCase__ ) return output def snake_case__ ( lowerCamelCase__ : Dict , lowerCamelCase__ : str=None ) -> int: A_ : Union[str, Any] = ids_tensor(lowerCamelCase__ , vocab_size=2 , rng=lowerCamelCase__ ) # make sure that at least one token is attended to for each batch A_ : List[Any] = 1 return attn_mask @require_flax class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = () def _a ( self : Tuple ): """simple docstring""" A_ ,A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 A_ : Any = 2 A_ : int = inputs['''input_ids'''].shape[-1] // 2 A_ : Optional[int] = inputs['''input_ids'''][:max_batch_size, :sequence_length] A_ : int = jnp.ones_like(_lowerCamelCase ) A_ : Dict = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens A_ : Optional[Any] = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` A_ : Tuple = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def _a ( self : Tuple ): """simple docstring""" A_ ,A_ ,A_ ,A_ : Union[str, Any] = self._get_input_ids_and_config() A_ : List[Any] = False A_ : Union[str, Any] = max_length A_ : List[Any] = 0 for model_class in self.all_generative_model_classes: A_ : Tuple = model_class(_lowerCamelCase ) A_ : Any = model_class.__name__[4:] # Skip the "Flax" at the beginning A_ : Dict = getattr(_lowerCamelCase , _lowerCamelCase ) A_ : str = pt_model_class(_lowerCamelCase ).eval() A_ : int = load_flax_weights_in_pytorch_model(_lowerCamelCase , flax_model.params ) A_ : Union[str, Any] = flax_model.generate(_lowerCamelCase ).sequences A_ : Optional[int] = pt_model.generate(torch.tensor(_lowerCamelCase , dtype=torch.long ) ) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: A_ : List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() ) def _a ( self : Any ): """simple docstring""" A_ ,A_ ,A_ ,A_ : str = self._get_input_ids_and_config() A_ : Optional[int] = False A_ : List[Any] = max_length for model_class in self.all_generative_model_classes: A_ : List[Any] = model_class(_lowerCamelCase ) A_ : str = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) A_ : List[Any] = jit(model.generate ) A_ : Optional[Any] = jit_generate(_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Any ): """simple docstring""" A_ ,A_ ,A_ ,A_ : List[Any] = self._get_input_ids_and_config() A_ : str = True A_ : Union[str, Any] = max_length for model_class in self.all_generative_model_classes: A_ : Union[str, Any] = model_class(_lowerCamelCase ) A_ : Optional[int] = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) A_ : str = jit(model.generate ) A_ : List[Any] = jit_generate(_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ ,A_ ,A_ ,A_ : Tuple = self._get_input_ids_and_config() A_ : int = False A_ : str = max_length A_ : Any = 2 for model_class in self.all_generative_model_classes: A_ : Union[str, Any] = model_class(_lowerCamelCase ) A_ : Dict = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) A_ : int = jit(model.generate ) A_ : Union[str, Any] = jit_generate(_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Optional[Any] ): """simple docstring""" A_ ,A_ ,A_ ,A_ : Dict = self._get_input_ids_and_config() A_ : Optional[int] = False A_ : List[str] = max_length A_ : int = 2 A_ : Union[str, Any] = 2 for model_class in self.all_generative_model_classes: A_ : Any = model_class(_lowerCamelCase ) A_ : str = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences ) def _a ( self : str ): """simple docstring""" A_ ,A_ ,A_ ,A_ : Optional[int] = self._get_input_ids_and_config() A_ : Union[str, Any] = True A_ : Optional[Any] = max_length A_ : Any = 0.8 A_ : str = 10 A_ : Dict = 0.3 A_ : int = 1 A_ : Union[str, Any] = 8 A_ : str = 9 for model_class in self.all_generative_model_classes: A_ : Union[str, Any] = model_class(_lowerCamelCase ) A_ : Optional[Any] = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) A_ : Any = jit(model.generate ) A_ : int = jit_generate(_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : List[str] ): """simple docstring""" A_ ,A_ ,A_ ,A_ : str = self._get_input_ids_and_config() A_ : Optional[int] = max_length A_ : Tuple = 1 A_ : List[str] = 8 A_ : Optional[int] = 9 for model_class in self.all_generative_model_classes: A_ : Union[str, Any] = model_class(_lowerCamelCase ) A_ : str = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) A_ : Optional[int] = jit(model.generate ) A_ : Optional[int] = jit_generate(_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : int ): """simple docstring""" A_ ,A_ ,A_ ,A_ : List[Any] = self._get_input_ids_and_config() A_ : Tuple = max_length A_ : Dict = 2 A_ : Dict = 1 A_ : List[str] = 8 A_ : int = 9 for model_class in self.all_generative_model_classes: A_ : Tuple = model_class(_lowerCamelCase ) A_ : int = model.generate(_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) A_ : List[str] = jit(model.generate ) A_ : int = jit_generate(_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : List[str] ): """simple docstring""" A_ ,A_ ,A_ ,A_ : str = self._get_input_ids_and_config() # pad attention mask on the left A_ : str = attention_mask.at[(0, 0)].set(0 ) A_ : Optional[Any] = False A_ : List[Any] = max_length for model_class in self.all_generative_model_classes: A_ : List[str] = model_class(_lowerCamelCase ) A_ : Any = model.generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) A_ : Optional[Any] = jit(model.generate ) A_ : Tuple = jit_generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : int ): """simple docstring""" A_ ,A_ ,A_ ,A_ : int = self._get_input_ids_and_config() # pad attention mask on the left A_ : Dict = attention_mask.at[(0, 0)].set(0 ) A_ : int = True A_ : str = max_length for model_class in self.all_generative_model_classes: A_ : List[Any] = model_class(_lowerCamelCase ) A_ : Union[str, Any] = model.generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) A_ : Optional[Any] = jit(model.generate ) A_ : List[str] = jit_generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _a ( self : Any ): """simple docstring""" A_ ,A_ ,A_ ,A_ : Tuple = self._get_input_ids_and_config() # pad attention mask on the left A_ : str = attention_mask.at[(0, 0)].set(0 ) A_ : Union[str, Any] = 2 A_ : Union[str, Any] = max_length for model_class in self.all_generative_model_classes: A_ : List[Any] = model_class(_lowerCamelCase ) A_ : Dict = model.generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , _lowerCamelCase ) A_ : List[Any] = jit(model.generate ) A_ : Optional[int] = jit_generate(_lowerCamelCase , attention_mask=_lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) @require_flax class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : Optional[int] ): """simple docstring""" A_ : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' ) A_ : Union[str, Any] = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) A_ : Any = '''Hello world''' A_ : int = tokenizer(_lowerCamelCase , return_tensors='''np''' ).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(_lowerCamelCase , '''do_samples''' ): model.generate(_lowerCamelCase , do_samples=_lowerCamelCase ) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(_lowerCamelCase , '''foo''' ): A_ : Optional[int] = {'''foo''': '''bar'''} model.generate(_lowerCamelCase , **_lowerCamelCase )
4
'''simple docstring''' import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 snake_case__ = sys.version_info >= (3, 10) def snake_case__ ( lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : str=None ) -> List[Any]: return field(default_factory=lambda: default , metadata=lowerCamelCase__ ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 4_2 _lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = False _lowerCAmelCase = True _lowerCAmelCase = None class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'titi' _lowerCAmelCase = 'toto' class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'titi' _lowerCAmelCase = 'toto' _lowerCAmelCase = 4_2 @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = "toto" def _a ( self : Optional[Any] ): """simple docstring""" A_ : Optional[int] = BasicEnum(self.foo ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = "toto" def _a ( self : Tuple ): """simple docstring""" A_ : Optional[Any] = MixedTypeEnum(self.foo ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} ) _lowerCAmelCase = None _lowerCAmelCase = list_field(default=[] ) _lowerCAmelCase = list_field(default=[] ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = list_field(default=[] ) _lowerCAmelCase = list_field(default=[1, 2, 3] ) _lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) _lowerCAmelCase = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = field() _lowerCAmelCase = field() _lowerCAmelCase = field() def _a ( self : Tuple ): """simple docstring""" A_ : Tuple = BasicEnum(self.required_enum ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = field() _lowerCAmelCase = None _lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} ) _lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) if is_python_no_less_than_3_10: @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = False _lowerCAmelCase = True _lowerCAmelCase = None @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} ) _lowerCAmelCase = None _lowerCAmelCase = list_field(default=[] ) _lowerCAmelCase = list_field(default=[] ) class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : List[str] , _lowerCamelCase : argparse.ArgumentParser , _lowerCamelCase : argparse.ArgumentParser ): """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): A_ : Union[str, Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''} A_ : Optional[Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('''choices''' , _lowerCamelCase ) and yy.get('''choices''' , _lowerCamelCase ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['''type'''](_lowerCamelCase ) , yy['''type'''](_lowerCamelCase ) ) del xx["type"], yy["type"] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Optional[int] ): """simple docstring""" A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase ) A_ : Optional[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--bar''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--baz''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--flag''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Union[str, Any] = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5'''] ((A_) ,) : List[str] = parser.parse_args_into_dataclasses(_lowerCamelCase , look_for_args_file=_lowerCamelCase ) self.assertFalse(example.flag ) def _a ( self : Dict ): """simple docstring""" A_ : int = HfArgumentParser(_lowerCamelCase ) A_ : int = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=42 , type=_lowerCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Dict ): """simple docstring""" A_ : Any = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) expected.add_argument('''--baz''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('''--no_baz''' , action='''store_false''' , default=_lowerCamelCase , dest='''baz''' ) expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase ) A_ : Dict = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(_lowerCamelCase ) for dataclass_type in dataclass_types: A_ : Any = HfArgumentParser(_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : List[Any] = parser.parse_args([] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : Optional[int] = parser.parse_args(['''--foo''', '''--no_baz'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : Union[str, Any] = parser.parse_args(['''--foo''', '''--baz'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : List[str] = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : List[Any] = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) def _a ( self : List[Any] ): """simple docstring""" A_ : str = HfArgumentParser(_lowerCamelCase ) A_ : Optional[int] = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : str = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) A_ : List[Any] = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) A_ : int = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) A_ : Dict = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) A_ : Tuple = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) A_ : List[str] = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def _a ( self : Optional[int] ): """simple docstring""" @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = "toto" A_ : List[str] = HfArgumentParser(_lowerCamelCase ) A_ : Tuple = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Tuple = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) A_ : List[str] = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) A_ : int = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) def _a ( self : Dict ): """simple docstring""" A_ : int = HfArgumentParser(_lowerCamelCase ) A_ : List[Any] = argparse.ArgumentParser() expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_lowerCamelCase ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase ) expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Optional[int] = parser.parse_args([] ) self.assertEqual( _lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , ) A_ : str = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() ) self.assertEqual(_lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) ) def _a ( self : Dict ): """simple docstring""" A_ : Optional[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=_lowerCamelCase , type=_lowerCamelCase ) expected.add_argument('''--bar''' , default=_lowerCamelCase , type=_lowerCamelCase , help='''help message''' ) expected.add_argument('''--baz''' , default=_lowerCamelCase , type=_lowerCamelCase ) expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) A_ : Tuple = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(_lowerCamelCase ) for dataclass_type in dataclass_types: A_ : int = HfArgumentParser(_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : List[Any] = parser.parse_args([] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , bar=_lowerCamelCase , baz=_lowerCamelCase , ces=[] , des=[] ) ) A_ : Optional[Any] = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() ) self.assertEqual(_lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) ) def _a ( self : List[Any] ): """simple docstring""" A_ : List[Any] = HfArgumentParser(_lowerCamelCase ) A_ : Dict = argparse.ArgumentParser() expected.add_argument('''--required_list''' , nargs='''+''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--required_str''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase ) A_ : List[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , ) expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : List[Any] = HfArgumentParser(_lowerCamelCase ) A_ : Union[str, Any] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } A_ : Optional[int] = parser.parse_dict(_lowerCamelCase )[0] A_ : str = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : List[str] ): """simple docstring""" A_ : Any = HfArgumentParser(_lowerCamelCase ) A_ : List[str] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, '''extra''': 42, } self.assertRaises(_lowerCamelCase , parser.parse_dict , _lowerCamelCase , allow_extra_keys=_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase ) A_ : List[str] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: A_ : Tuple = os.path.join(_lowerCamelCase , '''temp_json''' ) os.mkdir(_lowerCamelCase ) with open(temp_local_path + '''.json''' , '''w+''' ) as f: json.dump(_lowerCamelCase , _lowerCamelCase ) A_ : List[str] = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0] A_ : Optional[Any] = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : int ): """simple docstring""" A_ : int = HfArgumentParser(_lowerCamelCase ) A_ : Tuple = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: A_ : int = os.path.join(_lowerCamelCase , '''temp_yaml''' ) os.mkdir(_lowerCamelCase ) with open(temp_local_path + '''.yaml''' , '''w+''' ) as f: yaml.dump(_lowerCamelCase , _lowerCamelCase ) A_ : Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0] A_ : int = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Dict = HfArgumentParser(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase )
4
1
'''simple docstring''' from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def snake_case__ ( lowerCamelCase__ : Namespace ) -> Union[str, Any]: return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) snake_case__ = """ transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions. """ class UpperCamelCase_ (a__ ): """simple docstring""" @staticmethod def _a ( _lowerCamelCase : ArgumentParser ): """simple docstring""" A_ : List[str] = parser.add_parser( '''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , ) train_parser.add_argument('''--model_type''' , type=_lowerCamelCase , required=_lowerCamelCase , help='''Model\'s type.''' ) train_parser.add_argument( '''--tf_checkpoint''' , type=_lowerCamelCase , required=_lowerCamelCase , help='''TensorFlow checkpoint path or folder.''' ) train_parser.add_argument( '''--pytorch_dump_output''' , type=_lowerCamelCase , required=_lowerCamelCase , help='''Path to the PyTorch saved model output.''' ) train_parser.add_argument('''--config''' , type=_lowerCamelCase , default='''''' , help='''Configuration file path or folder.''' ) train_parser.add_argument( '''--finetuning_task_name''' , type=_lowerCamelCase , default=_lowerCamelCase , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , ) train_parser.set_defaults(func=_lowerCamelCase ) def __init__( self : Dict , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str , *_lowerCamelCase : Optional[Any] , ): """simple docstring""" A_ : Dict = logging.get_logger('''transformers-cli/converting''' ) self._logger.info(f'Loading model {model_type}' ) A_ : Tuple = model_type A_ : List[Any] = tf_checkpoint A_ : Any = pytorch_dump_output A_ : Union[str, Any] = config A_ : Dict = finetuning_task_name def _a ( self : Any ): """simple docstring""" if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_lowerCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_lowerCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_lowerCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(_lowerCamelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_lowerCamelCase ) if "ckpt" in self._tf_checkpoint.lower(): A_ : List[Any] = self._tf_checkpoint A_ : Optional[int] = '''''' else: A_ : Union[str, Any] = self._tf_checkpoint A_ : int = '''''' convert_transfo_xl_checkpoint_to_pytorch( _lowerCamelCase , self._config , self._pytorch_dump_output , _lowerCamelCase ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_lowerCamelCase ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(_lowerCamelCase ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) else: raise ValueError( '''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
4
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 snake_case__ = get_tests_dir("""fixtures""") class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : List[str] ): """simple docstring""" A_ : List[Any] = mock.Mock() A_ : List[str] = 500 A_ : Tuple = {} A_ : int = HTTPError A_ : Optional[Any] = {} # Download this model to make sure it's in the cache. A_ : Tuple = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' , return_value=_lowerCamelCase ) as mock_head: A_ : List[Any] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # This check we did call the fake head request mock_head.assert_called() def _a ( self : Tuple ): """simple docstring""" A_ : Tuple = ViTImageProcessor.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' ) def _a ( self : Dict ): """simple docstring""" with self.assertRaises(_lowerCamelCase ): # config is in subfolder, the following should not work without specifying the subfolder A_ : Any = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' ) A_ : Tuple = AutoImageProcessor.from_pretrained( '''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' ) self.assertIsNotNone(_lowerCamelCase ) @is_staging_test class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @classmethod def _a ( cls : Tuple ): """simple docstring""" A_ : int = TOKEN HfFolder.save_token(_lowerCamelCase ) @classmethod def _a ( cls : str ): """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-image-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' ) except HTTPError: pass def _a ( self : List[Any] ): """simple docstring""" A_ : Dict = ViTImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token ) A_ : Optional[int] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( _lowerCamelCase , repo_id='''test-image-processor''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token ) A_ : List[Any] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : int = ViTImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token ) A_ : List[str] = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( _lowerCamelCase , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token ) A_ : Any = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) def _a ( self : Optional[Any] ): """simple docstring""" CustomImageProcessor.register_for_auto_class() A_ : Any = CustomImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , ) A_ : str = AutoImageProcessor.from_pretrained( f'{USER}/test-dynamic-image-processor' , trust_remote_code=_lowerCamelCase ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
4
1
'''simple docstring''' # Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib snake_case__ = get_logger() snake_case__ = None class UpperCamelCase_ (TensorFormatter[Mapping, 'jax.Array', Mapping] ): """simple docstring""" def __init__( self : Tuple , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=None , **_lowerCamelCase : Dict ): """simple docstring""" super().__init__(features=_lowerCamelCase ) import jax from jaxlib.xla_client import Device if isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError( f'Expected {device} to be a `str` not {type(_lowerCamelCase )}, as `jaxlib.xla_extension.Device` ' '''is not serializable neither with `pickle` nor with `dill`. Instead you can surround ''' '''the device with `str()` to get its string identifier that will be internally mapped ''' '''to the actual `jaxlib.xla_extension.Device`.''' ) A_ : List[str] = device if isinstance(_lowerCamelCase , _lowerCamelCase ) else str(jax.devices()[0] ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: A_ : Dict = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys() ): logger.warning( f'Device with string identifier {self.device} not listed among the available ' f'devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default ' f'device: {str(jax.devices()[0] )}.' ) A_ : int = str(jax.devices()[0] ) A_ : Tuple = jnp_array_kwargs @staticmethod def _a ( ): """simple docstring""" import jax return {str(_lowerCamelCase ): device for device in jax.devices()} def _a ( self : int , _lowerCamelCase : List[str] ): """simple docstring""" import jax import jax.numpy as jnp if isinstance(_lowerCamelCase , _lowerCamelCase ) and column: if all( isinstance(_lowerCamelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return jnp.stack(_lowerCamelCase , axis=0 ) return column def _a ( self : List[str] , _lowerCamelCase : Dict ): """simple docstring""" import jax import jax.numpy as jnp if isinstance(_lowerCamelCase , (str, bytes, type(_lowerCamelCase )) ): return value elif isinstance(_lowerCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() A_ : Optional[int] = {} if isinstance(_lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: A_ : str = {'''dtype''': jnp.intaa} else: A_ : int = {'''dtype''': jnp.intaa} elif isinstance(_lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): A_ : Optional[int] = {'''dtype''': jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(_lowerCamelCase , PIL.Image.Image ): A_ : List[Any] = np.asarray(_lowerCamelCase ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: A_ : Tuple = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device] ): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(_lowerCamelCase , **{**default_dtype, **self.jnp_array_kwargs} ) def _a ( self : List[Any] , _lowerCamelCase : List[str] ): """simple docstring""" import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(_lowerCamelCase , torch.Tensor ): return self._tensorize(data_struct.detach().cpu().numpy()[()] ) if hasattr(_lowerCamelCase , '''__array__''' ) and not isinstance(_lowerCamelCase , jax.Array ): A_ : Dict = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(_lowerCamelCase , np.ndarray ): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(_lowerCamelCase ) for substruct in data_struct] ) elif isinstance(_lowerCamelCase , (list, tuple) ): return self._consolidate([self.recursive_tensorize(_lowerCamelCase ) for substruct in data_struct] ) return self._tensorize(_lowerCamelCase ) def _a ( self : Union[str, Any] , _lowerCamelCase : dict ): """simple docstring""" return map_nested(self._recursive_tensorize , _lowerCamelCase , map_list=_lowerCamelCase ) def _a ( self : str , _lowerCamelCase : pa.Table ): """simple docstring""" A_ : str = self.numpy_arrow_extractor().extract_row(_lowerCamelCase ) A_ : Optional[int] = self.python_features_decoder.decode_row(_lowerCamelCase ) return self.recursive_tensorize(_lowerCamelCase ) def _a ( self : int , _lowerCamelCase : pa.Table ): """simple docstring""" A_ : List[str] = self.numpy_arrow_extractor().extract_column(_lowerCamelCase ) A_ : Tuple = self.python_features_decoder.decode_column(_lowerCamelCase , pa_table.column_names[0] ) A_ : Tuple = self.recursive_tensorize(_lowerCamelCase ) A_ : List[Any] = self._consolidate(_lowerCamelCase ) return column def _a ( self : int , _lowerCamelCase : pa.Table ): """simple docstring""" A_ : Optional[int] = self.numpy_arrow_extractor().extract_batch(_lowerCamelCase ) A_ : Optional[Any] = self.python_features_decoder.decode_batch(_lowerCamelCase ) A_ : str = self.recursive_tensorize(_lowerCamelCase ) for column_name in batch: A_ : Union[str, Any] = self._consolidate(batch[column_name] ) return batch
4
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = ( 'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.' 'It takes two arguments named `image` which should be the original image, and `label` which should be a text ' 'describing the elements what should be identified in the segmentation mask. The tool returns the mask.' ) _lowerCAmelCase = 'CIDAS/clipseg-rd64-refined' _lowerCAmelCase = 'image_segmenter' _lowerCAmelCase = CLIPSegForImageSegmentation _lowerCAmelCase = ['image', 'text'] _lowerCAmelCase = ['image'] def __init__( self : Optional[int] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Union[str, Any] ): """simple docstring""" requires_backends(self , ['''vision'''] ) super().__init__(*_lowerCamelCase , **_lowerCamelCase ) def _a ( self : List[str] , _lowerCamelCase : "Image" , _lowerCamelCase : str ): """simple docstring""" return self.pre_processor(text=[label] , images=[image] , padding=_lowerCamelCase , return_tensors='''pt''' ) def _a ( self : Union[str, Any] , _lowerCamelCase : Optional[int] ): """simple docstring""" with torch.no_grad(): A_ : Optional[int] = self.model(**_lowerCamelCase ).logits return logits def _a ( self : List[str] , _lowerCamelCase : Optional[int] ): """simple docstring""" A_ : int = outputs.cpu().detach().numpy() A_ : Tuple = 0 A_ : List[str] = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
4
1
'''simple docstring''' from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. snake_case__ = 2_00 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. snake_case__ = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. snake_case__ = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 10_00)) def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : str ) -> tuple[str, float]: A_ : Union[str, Any] = len([g for position, g in enumerate(lowerCamelCase__ ) if g == main_target[position]] ) return (item, float(lowerCamelCase__ )) def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : str ) -> tuple[str, str]: A_ : List[Any] = random.randint(0 , len(lowerCamelCase__ ) - 1 ) A_ : List[str] = parent_a[:random_slice] + parent_a[random_slice:] A_ : Any = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : list[str] ) -> str: A_ : Union[str, Any] = list(lowerCamelCase__ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: A_ : Union[str, Any] = random.choice(lowerCamelCase__ ) return "".join(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : tuple[str, float] , lowerCamelCase__ : list[tuple[str, float]] , lowerCamelCase__ : list[str] , ) -> list[str]: A_ : List[str] = [] # Generate more children proportionally to the fitness score. A_ : Optional[Any] = int(parent_a[1] * 1_0_0 ) + 1 A_ : str = 1_0 if child_n >= 1_0 else child_n for _ in range(lowerCamelCase__ ): A_ : int = population_score[random.randint(0 , lowerCamelCase__ )][0] A_ ,A_ : Optional[int] = crossover(parent_a[0] , lowerCamelCase__ ) # Append new string to the population list. pop.append(mutate(lowerCamelCase__ , lowerCamelCase__ ) ) pop.append(mutate(lowerCamelCase__ , lowerCamelCase__ ) ) return pop def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : list[str] , lowerCamelCase__ : bool = True ) -> tuple[int, int, str]: # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: A_ : Optional[Any] = f'{N_POPULATION} must be bigger than {N_SELECTED}' raise ValueError(lowerCamelCase__ ) # Verify that the target contains no genes besides the ones inside genes variable. A_ : Optional[Any] = sorted({c for c in target if c not in genes} ) if not_in_genes_list: A_ : int = f'{not_in_genes_list} is not in genes list, evolution cannot converge' raise ValueError(lowerCamelCase__ ) # Generate random starting population. A_ : List[str] = [] for _ in range(lowerCamelCase__ ): population.append(''''''.join([random.choice(lowerCamelCase__ ) for i in range(len(lowerCamelCase__ ) )] ) ) # Just some logs to know what the algorithms is doing. A_ ,A_ : List[Any] = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(lowerCamelCase__ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. A_ : Optional[int] = [evaluate(lowerCamelCase__ , lowerCamelCase__ ) for item in population] # Check if there is a matching evolution. A_ : Optional[Any] = sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : x[1] , reverse=lowerCamelCase__ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 1_0 == 0: print( f'\nGeneration: {generation}' f'\nTotal Population:{total_population}' f'\nBest score: {population_score[0][1]}' f'\nBest string: {population_score[0][0]}' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. A_ : Optional[Any] = population[: int(N_POPULATION / 3 )] population.clear() population.extend(lowerCamelCase__ ) # Normalize population score to be between 0 and 1. A_ : Any = [ (item, score / len(lowerCamelCase__ )) for item, score in population_score ] # This is selection for i in range(lowerCamelCase__ ): population.extend(select(population_score[int(lowerCamelCase__ )] , lowerCamelCase__ , lowerCamelCase__ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(lowerCamelCase__ ) > N_POPULATION: break if __name__ == "__main__": snake_case__ = ( """This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!""" ) snake_case__ = list( """ ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm""" """nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\""" ) snake_case__ , snake_case__ , snake_case__ = basic(target_str, genes_list) print( F'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}' )
4
'''simple docstring''' from collections.abc import Sequence def snake_case__ ( lowerCamelCase__ : Sequence[float] , lowerCamelCase__ : bool = False ) -> float: if not arr: return 0 A_ : Union[str, Any] = 0 if allow_empty_subarrays else float('''-inf''' ) A_ : str = 0.0 for num in arr: A_ : Any = max(0 if allow_empty_subarrays else num , curr_sum + num ) A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() snake_case__ = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'{max_subarray_sum(nums) = }')
4
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_deformable_detr import DeformableDetrImageProcessor snake_case__ = logging.get_logger(__name__) class UpperCamelCase_ (a__ ): """simple docstring""" def __init__( self : str , *_lowerCamelCase : Dict , **_lowerCamelCase : Optional[int] ): """simple docstring""" warnings.warn( '''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use DeformableDetrImageProcessor instead.''' , _lowerCamelCase , ) super().__init__(*_lowerCamelCase , **_lowerCamelCase )
4
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json""" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'speech_to_text_2' _lowerCAmelCase = ['past_key_values'] _lowerCAmelCase = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : Optional[Any] , _lowerCamelCase : Optional[Any]=10000 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : int=2048 , _lowerCamelCase : Dict=4 , _lowerCamelCase : str=0.0 , _lowerCamelCase : int=True , _lowerCamelCase : int="relu" , _lowerCamelCase : Any=256 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : int=2 , _lowerCamelCase : List[str]=True , _lowerCamelCase : str=1 , _lowerCamelCase : List[Any]=0 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : Tuple=1024 , **_lowerCamelCase : int , ): """simple docstring""" A_ : Optional[int] = vocab_size A_ : Tuple = d_model A_ : List[str] = decoder_ffn_dim A_ : str = decoder_layers A_ : Any = decoder_attention_heads A_ : int = dropout A_ : str = attention_dropout A_ : Optional[int] = activation_dropout A_ : str = activation_function A_ : List[Any] = init_std A_ : Union[str, Any] = decoder_layerdrop A_ : Any = use_cache A_ : Optional[Any] = decoder_layers A_ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True A_ : Optional[Any] = max_target_positions super().__init__( pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
4
1
'''simple docstring''' from __future__ import annotations def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> list[int]: A_ : int = 0 A_ : str = len(lowerCamelCase__ ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: A_ : Tuple = i + 1 else: A_ : List[str] = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(F'{two_pointer([2, 7, 11, 15], 9) = }')
4
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING snake_case__ = logging.get_logger(__name__) snake_case__ = { """microsoft/table-transformer-detection""": ( """https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json""" ), } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'table-transformer' _lowerCAmelCase = ['past_key_values'] _lowerCAmelCase = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : Any , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Dict=None , _lowerCamelCase : int=3 , _lowerCamelCase : Any=100 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : Any=8 , _lowerCamelCase : Dict=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : int=8 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : Union[str, Any]=256 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : str=0.02 , _lowerCamelCase : Tuple=1.0 , _lowerCamelCase : Dict=False , _lowerCamelCase : str="sine" , _lowerCamelCase : str="resnet50" , _lowerCamelCase : Any=True , _lowerCamelCase : List[str]=False , _lowerCamelCase : Any=1 , _lowerCamelCase : int=5 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Optional[int]=1 , _lowerCamelCase : Any=1 , _lowerCamelCase : Dict=5 , _lowerCamelCase : str=2 , _lowerCamelCase : Union[str, Any]=0.1 , **_lowerCamelCase : int , ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) A_ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : str = backbone_config.get('''model_type''' ) A_ : Optional[int] = CONFIG_MAPPING[backbone_model_type] A_ : List[str] = config_class.from_dict(_lowerCamelCase ) # set timm attributes to None A_ ,A_ ,A_ : Union[str, Any] = None, None, None A_ : Optional[Any] = use_timm_backbone A_ : Optional[int] = backbone_config A_ : Optional[Any] = num_channels A_ : Dict = num_queries A_ : str = d_model A_ : List[str] = encoder_ffn_dim A_ : int = encoder_layers A_ : Optional[Any] = encoder_attention_heads A_ : List[str] = decoder_ffn_dim A_ : Any = decoder_layers A_ : List[str] = decoder_attention_heads A_ : Tuple = dropout A_ : Optional[Any] = attention_dropout A_ : Any = activation_dropout A_ : List[Any] = activation_function A_ : Dict = init_std A_ : Any = init_xavier_std A_ : List[Any] = encoder_layerdrop A_ : int = decoder_layerdrop A_ : Any = encoder_layers A_ : List[str] = auxiliary_loss A_ : List[Any] = position_embedding_type A_ : Optional[Any] = backbone A_ : Tuple = use_pretrained_backbone A_ : List[Any] = dilation # Hungarian matcher A_ : List[str] = class_cost A_ : str = bbox_cost A_ : Union[str, Any] = giou_cost # Loss coefficients A_ : Any = mask_loss_coefficient A_ : Optional[int] = dice_loss_coefficient A_ : Dict = bbox_loss_coefficient A_ : int = giou_loss_coefficient A_ : int = eos_coefficient super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase ) @property def _a ( self : List[Any] ): """simple docstring""" return self.encoder_attention_heads @property def _a ( self : Any ): """simple docstring""" return self.d_model class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = version.parse('1.11' ) @property def _a ( self : Tuple ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ] ) @property def _a ( self : Optional[int] ): """simple docstring""" return 1E-5 @property def _a ( self : str ): """simple docstring""" return 12
4
1
'''simple docstring''' import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) snake_case__ = { """sample_size""": 32, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 2, """num_class_embeds""": 10_00, """block_out_channels""": [32, 64], """attention_head_dim""": 8, """down_block_types""": [ """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """scale_shift""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } snake_case__ = { """sample_size""": 64, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 3, """num_class_embeds""": 10_00, """block_out_channels""": [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4], """attention_head_dim""": 64, """down_block_types""": [ """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """AttnUpBlock2D""", """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """scale_shift""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } snake_case__ = { """sample_size""": 2_56, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 2, """num_class_embeds""": None, """block_out_channels""": [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4], """attention_head_dim""": 64, """down_block_types""": [ """ResnetDownsampleBlock2D""", """ResnetDownsampleBlock2D""", """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """AttnUpBlock2D""", """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", """ResnetUpsampleBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """default""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } snake_case__ = { """num_train_timesteps""": 40, """sigma_min""": 0.0_0_2, """sigma_max""": 8_0.0, } snake_case__ = { """num_train_timesteps""": 2_01, """sigma_min""": 0.0_0_2, """sigma_max""": 8_0.0, } snake_case__ = { """num_train_timesteps""": 1_51, """sigma_min""": 0.0_0_2, """sigma_max""": 8_0.0, } def snake_case__ ( lowerCamelCase__ : Any ) -> str: if isinstance(lowerCamelCase__ , lowerCamelCase__ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('''boolean value expected''' ) def snake_case__ ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict=False ) -> Any: A_ : Optional[Any] = checkpoint[f'{old_prefix}.in_layers.0.weight'] A_ : List[Any] = checkpoint[f'{old_prefix}.in_layers.0.bias'] A_ : Optional[int] = checkpoint[f'{old_prefix}.in_layers.2.weight'] A_ : Optional[Any] = checkpoint[f'{old_prefix}.in_layers.2.bias'] A_ : List[Any] = checkpoint[f'{old_prefix}.emb_layers.1.weight'] A_ : Optional[Any] = checkpoint[f'{old_prefix}.emb_layers.1.bias'] A_ : str = checkpoint[f'{old_prefix}.out_layers.0.weight'] A_ : str = checkpoint[f'{old_prefix}.out_layers.0.bias'] A_ : Optional[Any] = checkpoint[f'{old_prefix}.out_layers.3.weight'] A_ : Any = checkpoint[f'{old_prefix}.out_layers.3.bias'] if has_skip: A_ : str = checkpoint[f'{old_prefix}.skip_connection.weight'] A_ : Tuple = checkpoint[f'{old_prefix}.skip_connection.bias'] return new_checkpoint def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any]=None ) -> Union[str, Any]: A_ ,A_ ,A_ : Any = checkpoint[f'{old_prefix}.qkv.weight'].chunk(3 , dim=0 ) A_ ,A_ ,A_ : str = checkpoint[f'{old_prefix}.qkv.bias'].chunk(3 , dim=0 ) A_ : Union[str, Any] = checkpoint[f'{old_prefix}.norm.weight'] A_ : Tuple = checkpoint[f'{old_prefix}.norm.bias'] A_ : Optional[Any] = weight_q.squeeze(-1 ).squeeze(-1 ) A_ : Tuple = bias_q.squeeze(-1 ).squeeze(-1 ) A_ : Any = weight_k.squeeze(-1 ).squeeze(-1 ) A_ : Any = bias_k.squeeze(-1 ).squeeze(-1 ) A_ : Tuple = weight_v.squeeze(-1 ).squeeze(-1 ) A_ : Optional[int] = bias_v.squeeze(-1 ).squeeze(-1 ) A_ : int = ( checkpoint[f'{old_prefix}.proj_out.weight'].squeeze(-1 ).squeeze(-1 ) ) A_ : List[Any] = checkpoint[f'{old_prefix}.proj_out.bias'].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Optional[Any] ) -> str: A_ : List[Any] = torch.load(lowerCamelCase__ , map_location='''cpu''' ) A_ : Optional[Any] = {} A_ : str = checkpoint['''time_embed.0.weight'''] A_ : Optional[Any] = checkpoint['''time_embed.0.bias'''] A_ : Optional[int] = checkpoint['''time_embed.2.weight'''] A_ : Union[str, Any] = checkpoint['''time_embed.2.bias'''] if unet_config["num_class_embeds"] is not None: A_ : str = checkpoint['''label_emb.weight'''] A_ : Optional[Any] = checkpoint['''input_blocks.0.0.weight'''] A_ : Tuple = checkpoint['''input_blocks.0.0.bias'''] A_ : Dict = unet_config['''down_block_types'''] A_ : Tuple = unet_config['''layers_per_block'''] A_ : Optional[int] = unet_config['''attention_head_dim'''] A_ : Any = unet_config['''block_out_channels'''] A_ : Optional[int] = 1 A_ : str = channels_list[0] for i, layer_type in enumerate(lowerCamelCase__ ): A_ : int = channels_list[i] A_ : List[Any] = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(lowerCamelCase__ ): A_ : Union[str, Any] = f'down_blocks.{i}.resnets.{j}' A_ : Dict = f'input_blocks.{current_layer}.0' A_ : Optional[Any] = True if j == 0 and downsample_block_has_skip else False A_ : Optional[Any] = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , has_skip=lowerCamelCase__ ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(lowerCamelCase__ ): A_ : Optional[Any] = f'down_blocks.{i}.resnets.{j}' A_ : List[str] = f'input_blocks.{current_layer}.0' A_ : Union[str, Any] = True if j == 0 and downsample_block_has_skip else False A_ : List[str] = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , has_skip=lowerCamelCase__ ) A_ : List[str] = f'down_blocks.{i}.attentions.{j}' A_ : Optional[Any] = f'input_blocks.{current_layer}.1' A_ : List[Any] = convert_attention( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) current_layer += 1 if i != len(lowerCamelCase__ ) - 1: A_ : str = f'down_blocks.{i}.downsamplers.0' A_ : List[str] = f'input_blocks.{current_layer}.0' A_ : List[str] = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) current_layer += 1 A_ : List[str] = current_channels # hardcoded the mid-block for now A_ : Dict = '''mid_block.resnets.0''' A_ : Dict = '''middle_block.0''' A_ : List[Any] = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) A_ : List[Any] = '''mid_block.attentions.0''' A_ : Dict = '''middle_block.1''' A_ : Any = convert_attention(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) A_ : Optional[Any] = '''mid_block.resnets.1''' A_ : Optional[Any] = '''middle_block.2''' A_ : int = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) A_ : Dict = 0 A_ : str = unet_config['''up_block_types'''] for i, layer_type in enumerate(lowerCamelCase__ ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): A_ : Any = f'up_blocks.{i}.resnets.{j}' A_ : Dict = f'output_blocks.{current_layer}.0' A_ : Optional[int] = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , has_skip=lowerCamelCase__ ) current_layer += 1 if i != len(lowerCamelCase__ ) - 1: A_ : Dict = f'up_blocks.{i}.upsamplers.0' A_ : List[Any] = f'output_blocks.{current_layer-1}.1' A_ : Optional[Any] = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): A_ : List[str] = f'up_blocks.{i}.resnets.{j}' A_ : Optional[int] = f'output_blocks.{current_layer}.0' A_ : Dict = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , has_skip=lowerCamelCase__ ) A_ : Tuple = f'up_blocks.{i}.attentions.{j}' A_ : Dict = f'output_blocks.{current_layer}.1' A_ : List[str] = convert_attention( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) current_layer += 1 if i != len(lowerCamelCase__ ) - 1: A_ : str = f'up_blocks.{i}.upsamplers.0' A_ : Optional[int] = f'output_blocks.{current_layer-1}.2' A_ : int = convert_resnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) A_ : Optional[Any] = checkpoint['''out.0.weight'''] A_ : Tuple = checkpoint['''out.0.bias'''] A_ : Optional[Any] = checkpoint['''out.2.weight'''] A_ : List[Any] = checkpoint['''out.2.bias'''] return new_checkpoint if __name__ == "__main__": snake_case__ = argparse.ArgumentParser() parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""") parser.add_argument( """--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model.""" ) parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""") snake_case__ = parser.parse_args() snake_case__ = strabool(args.class_cond) snake_case__ = os.path.basename(args.unet_path) print(F'Checkpoint: {ckpt_name}') # Get U-Net config if "imagenet64" in ckpt_name: snake_case__ = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): snake_case__ = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: snake_case__ = TEST_UNET_CONFIG else: raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.') if not args.class_cond: snake_case__ = None snake_case__ = con_pt_to_diffuser(args.unet_path, unet_config) snake_case__ = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: snake_case__ = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: snake_case__ = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): snake_case__ = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.') snake_case__ = CMStochasticIterativeScheduler(**scheduler_config) snake_case__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
4
'''simple docstring''' import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : List[str]=3 , _lowerCamelCase : Any=32 , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : int=10 , _lowerCamelCase : Union[str, Any]=[8, 16, 32, 64] , _lowerCamelCase : Dict=[1, 1, 2, 1] , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Any="relu" , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Dict=["stage2", "stage3", "stage4"] , _lowerCamelCase : Union[str, Any]=[2, 3, 4] , _lowerCamelCase : Tuple=1 , ): """simple docstring""" A_ : List[str] = parent A_ : List[str] = batch_size A_ : Union[str, Any] = image_size A_ : Tuple = num_channels A_ : Any = embeddings_size A_ : int = hidden_sizes A_ : Optional[Any] = depths A_ : List[Any] = is_training A_ : Optional[int] = use_labels A_ : int = hidden_act A_ : Tuple = num_labels A_ : Union[str, Any] = scope A_ : List[Any] = len(_lowerCamelCase ) A_ : Union[str, Any] = out_features A_ : List[Any] = out_indices A_ : Dict = num_groups def _a ( self : Optional[int] ): """simple docstring""" A_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : Union[str, Any] = None if self.use_labels: A_ : Any = ids_tensor([self.batch_size] , self.num_labels ) A_ : Any = self.get_config() return config, pixel_values, labels def _a ( self : Union[str, Any] ): """simple docstring""" return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def _a ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] ): """simple docstring""" A_ : Any = BitModel(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : int = model(_lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _a ( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] ): """simple docstring""" A_ : Dict = self.num_labels A_ : Optional[Any] = BitForImageClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self : Any , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[Any] ): """simple docstring""" A_ : List[Any] = BitBackbone(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : int = model(_lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None A_ : Optional[Any] = None A_ : int = BitBackbone(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : Optional[int] = model(_lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _a ( self : List[Any] ): """simple docstring""" A_ : Union[str, Any] = self.prepare_config_and_inputs() A_ ,A_ ,A_ : Union[str, Any] = config_and_inputs A_ : str = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class UpperCamelCase_ (a__, a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () _lowerCAmelCase = ( {'feature-extraction': BitModel, 'image-classification': BitForImageClassification} if is_torch_available() else {} ) _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False def _a ( self : Optional[Any] ): """simple docstring""" A_ : List[str] = BitModelTester(self ) A_ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _a ( self : List[Any] ): """simple docstring""" return @unittest.skip(reason='''Bit does not output attentions''' ) def _a ( self : str ): """simple docstring""" pass @unittest.skip(reason='''Bit does not use inputs_embeds''' ) def _a ( self : Union[str, Any] ): """simple docstring""" pass @unittest.skip(reason='''Bit does not support input and output embeddings''' ) def _a ( self : Any ): """simple docstring""" pass def _a ( self : List[Any] ): """simple docstring""" A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Dict = model_class(_lowerCamelCase ) A_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : int = [*signature.parameters.keys()] A_ : Union[str, Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ ,A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : str = model_class(config=_lowerCamelCase ) for name, module in model.named_modules(): if isinstance(_lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) def _a ( self : int ): """simple docstring""" def check_hidden_states_output(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : int ): A_ : Union[str, Any] = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() with torch.no_grad(): A_ : Union[str, Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) A_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A_ : List[Any] = self.model_tester.num_stages self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common() A_ : Tuple = ['''preactivation''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: A_ : Tuple = layer_type A_ : Optional[Any] = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A_ : List[str] = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) @unittest.skip(reason='''Bit does not use feedforward chunking''' ) def _a ( self : Tuple ): """simple docstring""" pass def _a ( self : str ): """simple docstring""" A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) @slow def _a ( self : Union[str, Any] ): """simple docstring""" for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : List[Any] = BitModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def snake_case__ ( ) -> Optional[int]: A_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : List[Any] ): """simple docstring""" return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _a ( self : Dict ): """simple docstring""" A_ : Optional[int] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowerCamelCase ) A_ : Union[str, Any] = self.default_image_processor A_ : Optional[int] = prepare_img() A_ : int = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase ) # forward pass with torch.no_grad(): A_ : Union[str, Any] = model(**_lowerCamelCase ) # verify the logits A_ : Dict = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) A_ : Tuple = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) ) @require_torch class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = (BitBackbone,) if is_torch_available() else () _lowerCAmelCase = BitConfig _lowerCAmelCase = False def _a ( self : List[str] ): """simple docstring""" A_ : Union[str, Any] = BitModelTester(self )
4
1
'''simple docstring''' import argparse import os from pathlib import Path import torch from bark.generation import _load_model as _bark_load_model from huggingface_hub import hf_hub_download from transformers import EncodecConfig, EncodecModel, set_seed from transformers.models.bark.configuration_bark import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, ) from transformers.models.bark.generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkGenerationConfig, BarkSemanticGenerationConfig, ) from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel from transformers.utils import logging logging.set_verbosity_info() snake_case__ = logging.get_logger(__name__) set_seed(7_70) snake_case__ = { """c_attn""": """att_proj""", """c_proj""": """out_proj""", """c_fc""": """in_proj""", """transformer.""": """""", """h.""": """layers.""", """ln_1""": """layernorm_1""", """ln_2""": """layernorm_2""", """ln_f""": """layernorm_final""", """wpe""": """position_embeds_layer""", """wte""": """input_embeds_layer""", } snake_case__ = { """text_small""": { """repo_id""": """suno/bark""", """file_name""": """text.pt""", }, """coarse_small""": { """repo_id""": """suno/bark""", """file_name""": """coarse.pt""", }, """fine_small""": { """repo_id""": """suno/bark""", """file_name""": """fine.pt""", }, """text""": { """repo_id""": """suno/bark""", """file_name""": """text_2.pt""", }, """coarse""": { """repo_id""": """suno/bark""", """file_name""": """coarse_2.pt""", }, """fine""": { """repo_id""": """suno/bark""", """file_name""": """fine_2.pt""", }, } snake_case__ = os.path.dirname(os.path.abspath(__file__)) snake_case__ = os.path.join(os.path.expanduser("""~"""), """.cache""") snake_case__ = os.path.join(os.getenv("""XDG_CACHE_HOME""", default_cache_dir), """suno""", """bark_v0""") def snake_case__ ( lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int]=False ) -> List[str]: A_ : Dict = model_type if use_small: key += "_small" return os.path.join(lowerCamelCase__ , REMOTE_MODEL_PATHS[key]['''file_name'''] ) def snake_case__ ( lowerCamelCase__ : Dict , lowerCamelCase__ : str ) -> Union[str, Any]: os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ ) hf_hub_download(repo_id=lowerCamelCase__ , filename=lowerCamelCase__ , local_dir=lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Dict=False , lowerCamelCase__ : Union[str, Any]="text" ) -> int: if model_type == "text": A_ : int = BarkSemanticModel A_ : List[Any] = BarkSemanticConfig A_ : int = BarkSemanticGenerationConfig elif model_type == "coarse": A_ : Optional[int] = BarkCoarseModel A_ : Optional[int] = BarkCoarseConfig A_ : int = BarkCoarseGenerationConfig elif model_type == "fine": A_ : Optional[Any] = BarkFineModel A_ : Tuple = BarkFineConfig A_ : Tuple = BarkFineGenerationConfig else: raise NotImplementedError() A_ : int = f'{model_type}_small' if use_small else model_type A_ : str = REMOTE_MODEL_PATHS[model_key] if not os.path.exists(lowerCamelCase__ ): logger.info(f'{model_type} model not found, downloading into `{CACHE_DIR}`.' ) _download(model_info['''repo_id'''] , model_info['''file_name'''] ) A_ : Optional[Any] = torch.load(lowerCamelCase__ , map_location=lowerCamelCase__ ) # this is a hack A_ : Optional[int] = checkpoint['''model_args'''] if "input_vocab_size" not in model_args: A_ : int = model_args['''vocab_size'''] A_ : Union[str, Any] = model_args['''vocab_size'''] del model_args["vocab_size"] # convert Bark model arguments to HF Bark model arguments A_ : Any = model_args.pop('''n_head''' ) A_ : str = model_args.pop('''n_embd''' ) A_ : Optional[Any] = model_args.pop('''n_layer''' ) A_ : Optional[int] = ConfigClass(**checkpoint['''model_args'''] ) A_ : Union[str, Any] = ModelClass(config=lowerCamelCase__ ) A_ : int = GenerationConfigClass() A_ : Dict = model_generation_config A_ : List[str] = checkpoint['''model'''] # fixup checkpoint A_ : List[str] = '''_orig_mod.''' for k, v in list(state_dict.items() ): if k.startswith(lowerCamelCase__ ): # replace part of the key with corresponding layer name in HF implementation A_ : Optional[int] = k[len(lowerCamelCase__ ) :] for old_layer_name in new_layer_name_dict: A_ : int = new_k.replace(lowerCamelCase__ , new_layer_name_dict[old_layer_name] ) A_ : Any = state_dict.pop(lowerCamelCase__ ) A_ : Any = set(state_dict.keys() ) - set(model.state_dict().keys() ) A_ : List[Any] = {k for k in extra_keys if not k.endswith('''.attn.bias''' )} A_ : Tuple = set(model.state_dict().keys() ) - set(state_dict.keys() ) A_ : Tuple = {k for k in missing_keys if not k.endswith('''.attn.bias''' )} if len(lowerCamelCase__ ) != 0: raise ValueError(f'extra keys found: {extra_keys}' ) if len(lowerCamelCase__ ) != 0: raise ValueError(f'missing keys: {missing_keys}' ) model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ ) A_ : Optional[int] = model.num_parameters(exclude_embeddings=lowerCamelCase__ ) A_ : Any = checkpoint['''best_val_loss'''].item() logger.info(f'model loaded: {round(n_params/1e6 , 1 )}M params, {round(lowerCamelCase__ , 3 )} loss' ) model.eval() model.to(lowerCamelCase__ ) del checkpoint, state_dict return model def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : List[str]="text" ) -> Optional[Any]: if model_type not in ("text", "coarse", "fine"): raise NotImplementedError() A_ : Any = '''cpu''' # do conversion on cpu A_ : Dict = _get_ckpt_path(lowerCamelCase__ , use_small=lowerCamelCase__ ) A_ : Dict = _load_model(lowerCamelCase__ , lowerCamelCase__ , model_type=lowerCamelCase__ , use_small=lowerCamelCase__ ) # load bark initial model A_ : Tuple = _bark_load_model(lowerCamelCase__ , '''cpu''' , model_type=lowerCamelCase__ , use_small=lowerCamelCase__ ) if model_type == "text": A_ : Any = bark_model['''model'''] if model.num_parameters(exclude_embeddings=lowerCamelCase__ ) != bark_model.get_num_params(): raise ValueError('''initial and new models don\'t have the same number of parameters''' ) # check if same output as the bark model A_ : Dict = 5 A_ : List[str] = 1_0 if model_type in ["text", "coarse"]: A_ : Union[str, Any] = torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int ) A_ : List[Any] = bark_model(lowerCamelCase__ )[0] A_ : Union[str, Any] = model(lowerCamelCase__ ) # take last logits A_ : Union[str, Any] = output_new_model_total.logits[:, [-1], :] else: A_ : Union[str, Any] = 3 A_ : Tuple = 8 A_ : Union[str, Any] = torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int ) A_ : Any = model(lowerCamelCase__ , lowerCamelCase__ ) A_ : Dict = bark_model(lowerCamelCase__ , lowerCamelCase__ ) A_ : int = output_new_model_total.logits # output difference should come from the difference of self-attention implementation design if output_new_model.shape != output_old_model.shape: raise ValueError('''initial and new outputs don\'t have the same shape''' ) if (output_new_model - output_old_model).abs().max().item() > 1e-3: raise ValueError('''initial and new outputs are not equal''' ) Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) model.save_pretrained(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , ) -> Optional[Any]: A_ : Dict = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) A_ : Optional[int] = BarkSemanticConfig.from_pretrained(os.path.join(lowerCamelCase__ , '''config.json''' ) ) A_ : Any = BarkCoarseConfig.from_pretrained(os.path.join(lowerCamelCase__ , '''config.json''' ) ) A_ : Any = BarkFineConfig.from_pretrained(os.path.join(lowerCamelCase__ , '''config.json''' ) ) A_ : Optional[Any] = EncodecConfig.from_pretrained('''facebook/encodec_24khz''' ) A_ : Optional[Any] = BarkSemanticModel.from_pretrained(lowerCamelCase__ ) A_ : int = BarkCoarseModel.from_pretrained(lowerCamelCase__ ) A_ : Union[str, Any] = BarkFineModel.from_pretrained(lowerCamelCase__ ) A_ : str = EncodecModel.from_pretrained('''facebook/encodec_24khz''' ) A_ : Union[str, Any] = BarkConfig.from_sub_model_configs( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) A_ : List[Any] = BarkGenerationConfig.from_sub_model_configs( semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config ) A_ : int = BarkModel(lowerCamelCase__ ) A_ : List[str] = semantic A_ : List[str] = coarseAcoustic A_ : Dict = fineAcoustic A_ : List[Any] = codec A_ : Union[str, Any] = bark_generation_config Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) bark.save_pretrained(lowerCamelCase__ , repo_id=lowerCamelCase__ , push_to_hub=lowerCamelCase__ ) if __name__ == "__main__": snake_case__ = argparse.ArgumentParser() # Required parameters parser.add_argument("""model_type""", type=str, help="""text, coarse or fine.""") parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--is_small""", action="""store_true""", help="""convert the small version instead of the large.""") snake_case__ = parser.parse_args() load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
4
'''simple docstring''' import pprint import requests snake_case__ = """https://zenquotes.io/api""" def snake_case__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '''/today''' ).json() def snake_case__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '''/random''' ).json() if __name__ == "__main__": snake_case__ = random_quotes() pprint.pprint(response)
4
1
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : list ) -> list: A_ : str = len(lowerCamelCase__ ) for i in range(1 , lowerCamelCase__ ): A_ : List[Any] = collection[i] A_ : int = 0 A_ : str = i - 1 while low <= high: A_ : Optional[int] = (low + high) // 2 if val < collection[mid]: A_ : Any = mid - 1 else: A_ : List[Any] = mid + 1 for j in range(lowerCamelCase__ , lowerCamelCase__ , -1 ): A_ : Optional[int] = collection[j - 1] A_ : Any = val return collection if __name__ == "__main__": snake_case__ = input("""Enter numbers separated by a comma:\n""").strip() snake_case__ = [int(item) for item in user_input.split(""",""")] print(binary_insertion_sort(unsorted))
4
'''simple docstring''' from __future__ import annotations class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[int] , _lowerCamelCase : int ): """simple docstring""" A_ : Union[str, Any] = order # a_{0} ... a_{k} A_ : Union[str, Any] = [1.0] + [0.0] * order # b_{0} ... b_{k} A_ : int = [1.0] + [0.0] * order # x[n-1] ... x[n-k] A_ : str = [0.0] * self.order # y[n-1] ... y[n-k] A_ : Optional[Any] = [0.0] * self.order def _a ( self : Dict , _lowerCamelCase : list[float] , _lowerCamelCase : list[float] ): """simple docstring""" if len(_lowerCamelCase ) < self.order: A_ : Any = [1.0, *a_coeffs] if len(_lowerCamelCase ) != self.order + 1: A_ : List[Any] = ( f'Expected a_coeffs to have {self.order + 1} elements ' f'for {self.order}-order filter, got {len(_lowerCamelCase )}' ) raise ValueError(_lowerCamelCase ) if len(_lowerCamelCase ) != self.order + 1: A_ : Union[str, Any] = ( f'Expected b_coeffs to have {self.order + 1} elements ' f'for {self.order}-order filter, got {len(_lowerCamelCase )}' ) raise ValueError(_lowerCamelCase ) A_ : Tuple = a_coeffs A_ : str = b_coeffs def _a ( self : Tuple , _lowerCamelCase : float ): """simple docstring""" A_ : Any = 0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) A_ : str = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0] A_ : Optional[Any] = self.input_history[:-1] A_ : List[str] = self.output_history[:-1] A_ : Tuple = sample A_ : Tuple = result return result
4
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: snake_case__ = None snake_case__ = logging.get_logger(__name__) snake_case__ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} snake_case__ = { """vocab_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json""" ), }, } snake_case__ = { """facebook/nllb-large-en-ro""": 10_24, """facebook/nllb-200-distilled-600M""": 10_24, } # fmt: off snake_case__ = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""] class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = VOCAB_FILES_NAMES _lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase = ['input_ids', 'attention_mask'] _lowerCAmelCase = NllbTokenizer _lowerCAmelCase = [] _lowerCAmelCase = [] def __init__( self : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : List[str]=None , _lowerCamelCase : str="<s>" , _lowerCamelCase : Optional[Any]="</s>" , _lowerCamelCase : str="</s>" , _lowerCamelCase : Dict="<s>" , _lowerCamelCase : Optional[int]="<unk>" , _lowerCamelCase : Tuple="<pad>" , _lowerCamelCase : List[str]="<mask>" , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Union[str, Any]=False , **_lowerCamelCase : Tuple , ): """simple docstring""" A_ : Dict = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token A_ : Union[str, Any] = legacy_behaviour super().__init__( vocab_file=_lowerCamelCase , tokenizer_file=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , src_lang=_lowerCamelCase , tgt_lang=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , legacy_behaviour=_lowerCamelCase , **_lowerCamelCase , ) A_ : str = vocab_file A_ : int = False if not self.vocab_file else True A_ : Any = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) A_ : str = { lang_code: self.convert_tokens_to_ids(_lowerCamelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES } A_ : Any = src_lang if src_lang is not None else '''eng_Latn''' A_ : Tuple = self.convert_tokens_to_ids(self._src_lang ) A_ : Union[str, Any] = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def _a ( self : List[str] ): """simple docstring""" return self._src_lang @src_lang.setter def _a ( self : int , _lowerCamelCase : str ): """simple docstring""" A_ : Dict = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _a ( self : int , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _a ( self : Optional[int] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ): """simple docstring""" A_ : Dict = [self.sep_token_id] A_ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _a ( self : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] , _lowerCamelCase : Optional[str] , **_lowerCamelCase : str ): """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) A_ : List[Any] = src_lang A_ : List[Any] = self(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ) A_ : str = self.convert_tokens_to_ids(_lowerCamelCase ) A_ : Tuple = tgt_lang_id return inputs def _a ( self : Any , _lowerCamelCase : List[str] , _lowerCamelCase : str = "eng_Latn" , _lowerCamelCase : Optional[List[str]] = None , _lowerCamelCase : str = "fra_Latn" , **_lowerCamelCase : List[Any] , ): """simple docstring""" A_ : Dict = src_lang A_ : Any = tgt_lang return super().prepare_seqaseq_batch(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) def _a ( self : List[str] ): """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def _a ( self : Optional[int] ): """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _a ( self : Any , _lowerCamelCase : Optional[Any] ): """simple docstring""" A_ : Optional[int] = self.convert_tokens_to_ids(_lowerCamelCase ) if self.legacy_behaviour: A_ : Tuple = [] A_ : int = [self.eos_token_id, self.cur_lang_code] else: A_ : Any = [self.cur_lang_code] A_ : str = [self.eos_token_id] A_ : Dict = self.convert_ids_to_tokens(self.prefix_tokens ) A_ : str = self.convert_ids_to_tokens(self.suffix_tokens ) A_ : Tuple = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _a ( self : Union[str, Any] , _lowerCamelCase : str ): """simple docstring""" A_ : str = self.convert_tokens_to_ids(_lowerCamelCase ) if self.legacy_behaviour: A_ : Tuple = [] A_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code] else: A_ : Dict = [self.cur_lang_code] A_ : Tuple = [self.eos_token_id] A_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens ) A_ : int = self.convert_ids_to_tokens(self.suffix_tokens ) A_ : Union[str, Any] = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _a ( self : Tuple , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(_lowerCamelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory.' ) return A_ : Any = os.path.join( _lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ): copyfile(self.vocab_file , _lowerCamelCase ) return (out_vocab_file,)
4
'''simple docstring''' class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[Any] , _lowerCamelCase : Union[str, Any] ): """simple docstring""" A_ : Union[str, Any] = val A_ : Tuple = None A_ : Any = None def _a ( self : Tuple , _lowerCamelCase : List[Any] ): """simple docstring""" if self.val: if val < self.val: if self.left is None: A_ : int = Node(_lowerCamelCase ) else: self.left.insert(_lowerCamelCase ) elif val > self.val: if self.right is None: A_ : List[str] = Node(_lowerCamelCase ) else: self.right.insert(_lowerCamelCase ) else: A_ : Any = val def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] ) -> str: # Recursive traversal if root: inorder(root.left , lowerCamelCase__ ) res.append(root.val ) inorder(root.right , lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> Tuple: # Build BST if len(lowerCamelCase__ ) == 0: return arr A_ : Dict = Node(arr[0] ) for i in range(1 , len(lowerCamelCase__ ) ): root.insert(arr[i] ) # Traverse BST in order. A_ : Tuple = [] inorder(lowerCamelCase__ , lowerCamelCase__ ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
4
1
'''simple docstring''' import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope='''session''' ) def snake_case__ ( ) -> int: A_ : Any = 1_0 A_ : Dict = datasets.Features( { '''tokens''': datasets.Sequence(datasets.Value('''string''' ) ), '''labels''': datasets.Sequence(datasets.ClassLabel(names=['''negative''', '''positive'''] ) ), '''answers''': datasets.Sequence( { '''text''': datasets.Value('''string''' ), '''answer_start''': datasets.Value('''int32''' ), } ), '''id''': datasets.Value('''int64''' ), } ) A_ : Dict = datasets.Dataset.from_dict( { '''tokens''': [['''foo'''] * 5] * n, '''labels''': [[1] * 5] * n, '''answers''': [{'''answer_start''': [9_7], '''text''': ['''1976''']}] * 1_0, '''id''': list(range(lowerCamelCase__ ) ), } , features=lowerCamelCase__ , ) return dataset @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : List[str] ) -> Union[str, Any]: A_ : Dict = str(tmp_path_factory.mktemp('''data''' ) / '''file.arrow''' ) dataset.map(cache_file_name=lowerCamelCase__ ) return filename # FILE_CONTENT + files snake_case__ = """\ Text data. Second line of data.""" @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Dict ) -> List[str]: A_ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt''' A_ : int = FILE_CONTENT with open(lowerCamelCase__ , '''w''' ) as f: f.write(lowerCamelCase__ ) return filename @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : List[Any] ) -> Dict: import bza A_ : int = tmp_path_factory.mktemp('''data''' ) / '''file.txt.bz2''' A_ : Union[str, Any] = bytes(lowerCamelCase__ , '''utf-8''' ) with bza.open(lowerCamelCase__ , '''wb''' ) as f: f.write(lowerCamelCase__ ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : List[str] ) -> List[Any]: import gzip A_ : List[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''file.txt.gz''' ) A_ : int = bytes(lowerCamelCase__ , '''utf-8''' ) with gzip.open(lowerCamelCase__ , '''wb''' ) as f: f.write(lowerCamelCase__ ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : List[str] ) -> Optional[Any]: if datasets.config.LZ4_AVAILABLE: import lza.frame A_ : List[str] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.lz4''' A_ : Optional[int] = bytes(lowerCamelCase__ , '''utf-8''' ) with lza.frame.open(lowerCamelCase__ , '''wb''' ) as f: f.write(lowerCamelCase__ ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Any ) -> List[str]: if datasets.config.PY7ZR_AVAILABLE: import pyazr A_ : int = tmp_path_factory.mktemp('''data''' ) / '''file.txt.7z''' with pyazr.SevenZipFile(lowerCamelCase__ , '''w''' ) as archive: archive.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : str ) -> List[Any]: import tarfile A_ : Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.tar''' with tarfile.TarFile(lowerCamelCase__ , '''w''' ) as f: f.add(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : str ) -> str: import lzma A_ : List[str] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.xz''' A_ : Any = bytes(lowerCamelCase__ , '''utf-8''' ) with lzma.open(lowerCamelCase__ , '''wb''' ) as f: f.write(lowerCamelCase__ ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict ) -> Union[str, Any]: import zipfile A_ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zip''' with zipfile.ZipFile(lowerCamelCase__ , '''w''' ) as f: f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : List[str] ) -> int: if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd A_ : Any = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zst''' A_ : List[str] = bytes(lowerCamelCase__ , '''utf-8''' ) with zstd.open(lowerCamelCase__ , '''wb''' ) as f: f.write(lowerCamelCase__ ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Tuple ) -> Tuple: A_ : List[Any] = tmp_path_factory.mktemp('''data''' ) / '''file.xml''' A_ : Optional[int] = textwrap.dedent( '''\ <?xml version="1.0" encoding="UTF-8" ?> <tmx version="1.4"> <header segtype="sentence" srclang="ca" /> <body> <tu> <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv> <tuv xml:lang="en"><seg>Content 1</seg></tuv> </tu> <tu> <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv> <tuv xml:lang="en"><seg>Content 2</seg></tuv> </tu> <tu> <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv> <tuv xml:lang="en"><seg>Content 3</seg></tuv> </tu> <tu> <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv> <tuv xml:lang="en"><seg>Content 4</seg></tuv> </tu> <tu> <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv> <tuv xml:lang="en"><seg>Content 5</seg></tuv> </tu> </body> </tmx>''' ) with open(lowerCamelCase__ , '''w''' ) as f: f.write(lowerCamelCase__ ) return filename snake_case__ = [ {"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0}, {"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0}, {"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0}, {"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0}, ] snake_case__ = [ {"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0}, {"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0}, ] snake_case__ = { """col_1""": ["""0""", """1""", """2""", """3"""], """col_2""": [0, 1, 2, 3], """col_3""": [0.0, 1.0, 2.0, 3.0], } snake_case__ = [ {"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0}, {"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1}, ] snake_case__ = [ {"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0}, {"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0}, {"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0}, {"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0}, ] @pytest.fixture(scope='''session''' ) def snake_case__ ( ) -> List[str]: return DATA_DICT_OF_LISTS @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : List[Any] ) -> Any: A_ : Optional[Any] = datasets.Dataset.from_dict(lowerCamelCase__ ) A_ : Union[str, Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.arrow''' ) dataset.map(cache_file_name=lowerCamelCase__ ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : List[str] ) -> List[Any]: A_ : Tuple = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.sqlite''' ) with contextlib.closing(sqlitea.connect(lowerCamelCase__ ) ) as con: A_ : str = con.cursor() cur.execute('''CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)''' ) for item in DATA: cur.execute('''INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)''' , tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : int ) -> Optional[int]: A_ : str = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.csv''' ) with open(lowerCamelCase__ , '''w''' , newline='''''' ) as f: A_ : Any = csv.DictWriter(lowerCamelCase__ , fieldnames=['''col_1''', '''col_2''', '''col_3'''] ) writer.writeheader() for item in DATA: writer.writerow(lowerCamelCase__ ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : List[str] ) -> Optional[int]: A_ : str = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.csv''' ) with open(lowerCamelCase__ , '''w''' , newline='''''' ) as f: A_ : str = csv.DictWriter(lowerCamelCase__ , fieldnames=['''col_1''', '''col_2''', '''col_3'''] ) writer.writeheader() for item in DATA: writer.writerow(lowerCamelCase__ ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any] ) -> int: import bza A_ : List[str] = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.bz2''' with open(lowerCamelCase__ , '''rb''' ) as f: A_ : Union[str, Any] = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(lowerCamelCase__ , '''wb''' ) as f: f.write(lowerCamelCase__ ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] ) -> List[Any]: A_ : Tuple = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip''' with zipfile.ZipFile(lowerCamelCase__ , '''w''' ) as f: f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[Any] ) -> Optional[int]: A_ : Dict = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip''' with zipfile.ZipFile(lowerCamelCase__ , '''w''' ) as f: f.write(lowerCamelCase__ , arcname=os.path.basename(csv_path.replace('''.csv''' , '''.CSV''' ) ) ) f.write(lowerCamelCase__ , arcname=os.path.basename(csva_path.replace('''.csv''' , '''.CSV''' ) ) ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any ) -> List[Any]: A_ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.csv.zip''' with zipfile.ZipFile(lowerCamelCase__ , '''w''' ) as f: f.write(lowerCamelCase__ , arcname=os.path.join('''main_dir''' , os.path.basename(lowerCamelCase__ ) ) ) f.write(lowerCamelCase__ , arcname=os.path.join('''main_dir''' , os.path.basename(lowerCamelCase__ ) ) ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Optional[Any] ) -> List[str]: A_ : Optional[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.parquet''' ) A_ : Optional[int] = pa.schema( { '''col_1''': pa.string(), '''col_2''': pa.intaa(), '''col_3''': pa.floataa(), } ) with open(lowerCamelCase__ , '''wb''' ) as f: A_ : List[str] = pq.ParquetWriter(lowerCamelCase__ , schema=lowerCamelCase__ ) A_ : Union[str, Any] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCamelCase__ ) )] for k in DATA[0]} , schema=lowerCamelCase__ ) writer.write_table(lowerCamelCase__ ) writer.close() return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : int ) -> List[Any]: A_ : Tuple = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' ) A_ : List[Any] = {'''data''': DATA} with open(lowerCamelCase__ , '''w''' ) as f: json.dump(lowerCamelCase__ , lowerCamelCase__ ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : str ) -> Dict: A_ : Tuple = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' ) A_ : Optional[Any] = {'''data''': DATA_DICT_OF_LISTS} with open(lowerCamelCase__ , '''w''' ) as f: json.dump(lowerCamelCase__ , lowerCamelCase__ ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : List[Any] ) -> Optional[int]: A_ : Union[str, Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl''' ) with open(lowerCamelCase__ , '''w''' ) as f: for item in DATA: f.write(json.dumps(lowerCamelCase__ ) + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Union[str, Any] ) -> List[str]: A_ : Dict = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.jsonl''' ) with open(lowerCamelCase__ , '''w''' ) as f: for item in DATA: f.write(json.dumps(lowerCamelCase__ ) + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : int ) -> List[Any]: A_ : Any = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_312.jsonl''' ) with open(lowerCamelCase__ , '''w''' ) as f: for item in DATA_312: f.write(json.dumps(lowerCamelCase__ ) + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Optional[Any] ) -> Optional[Any]: A_ : List[str] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset-str.jsonl''' ) with open(lowerCamelCase__ , '''w''' ) as f: for item in DATA_STR: f.write(json.dumps(lowerCamelCase__ ) + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Tuple , lowerCamelCase__ : str ) -> List[Any]: import gzip A_ : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt.gz''' ) with open(lowerCamelCase__ , '''rb''' ) as orig_file: with gzip.open(lowerCamelCase__ , '''wb''' ) as zipped_file: zipped_file.writelines(lowerCamelCase__ ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any] ) -> List[str]: import gzip A_ : str = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.gz''' ) with open(lowerCamelCase__ , '''rb''' ) as orig_file: with gzip.open(lowerCamelCase__ , '''wb''' ) as zipped_file: zipped_file.writelines(lowerCamelCase__ ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[Any] ) -> str: A_ : Any = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.zip''' with zipfile.ZipFile(lowerCamelCase__ , '''w''' ) as f: f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] ) -> Union[str, Any]: A_ : str = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.zip''' with zipfile.ZipFile(lowerCamelCase__ , '''w''' ) as f: f.write(lowerCamelCase__ , arcname=os.path.join('''nested''' , os.path.basename(lowerCamelCase__ ) ) ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] ) -> List[str]: A_ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.jsonl.zip''' with zipfile.ZipFile(lowerCamelCase__ , '''w''' ) as f: f.write(lowerCamelCase__ , arcname=os.path.join('''main_dir''' , os.path.basename(lowerCamelCase__ ) ) ) f.write(lowerCamelCase__ , arcname=os.path.join('''main_dir''' , os.path.basename(lowerCamelCase__ ) ) ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int ) -> Tuple: A_ : List[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.tar''' with tarfile.TarFile(lowerCamelCase__ , '''w''' ) as f: f.add(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) f.add(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any] ) -> Any: A_ : int = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.tar''' with tarfile.TarFile(lowerCamelCase__ , '''w''' ) as f: f.add(lowerCamelCase__ , arcname=os.path.join('''nested''' , os.path.basename(lowerCamelCase__ ) ) ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Tuple ) -> Tuple: A_ : Any = ['''0''', '''1''', '''2''', '''3'''] A_ : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt''' ) with open(lowerCamelCase__ , '''w''' ) as f: for item in data: f.write(item + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Dict ) -> Any: A_ : Union[str, Any] = ['''0''', '''1''', '''2''', '''3'''] A_ : int = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.txt''' ) with open(lowerCamelCase__ , '''w''' ) as f: for item in data: f.write(item + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : int ) -> Union[str, Any]: A_ : Union[str, Any] = ['''0''', '''1''', '''2''', '''3'''] A_ : str = tmp_path_factory.mktemp('''data''' ) / '''dataset.abc''' with open(lowerCamelCase__ , '''w''' ) as f: for item in data: f.write(item + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : Union[str, Any] ) -> Optional[int]: A_ : Dict = tmp_path_factory.mktemp('''data''' ) / '''dataset.text.zip''' with zipfile.ZipFile(lowerCamelCase__ , '''w''' ) as f: f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any ) -> List[Any]: A_ : int = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.text.zip''' with zipfile.ZipFile(lowerCamelCase__ , '''w''' ) as f: f.write(lowerCamelCase__ , arcname=os.path.join('''main_dir''' , os.path.basename(lowerCamelCase__ ) ) ) f.write(lowerCamelCase__ , arcname=os.path.join('''main_dir''' , os.path.basename(lowerCamelCase__ ) ) ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] ) -> str: A_ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset.ext.zip''' with zipfile.ZipFile(lowerCamelCase__ , '''w''' ) as f: f.write(lowerCamelCase__ , arcname=os.path.basename('''unsupported.ext''' ) ) f.write(lowerCamelCase__ , arcname=os.path.basename('''unsupported_2.ext''' ) ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : int ) -> List[Any]: A_ : Union[str, Any] = '''\n'''.join(['''First''', '''Second\u2029with Unicode new line''', '''Third'''] ) A_ : Any = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_with_unicode_new_lines.txt''' ) with open(lowerCamelCase__ , '''w''' , encoding='''utf-8''' ) as f: f.write(lowerCamelCase__ ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( ) -> List[Any]: return os.path.join('''tests''' , '''features''' , '''data''' , '''test_image_rgb.jpg''' ) @pytest.fixture(scope='''session''' ) def snake_case__ ( ) -> Optional[int]: return os.path.join('''tests''' , '''features''' , '''data''' , '''test_audio_44100.wav''' ) @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : int ) -> List[str]: A_ : Tuple = tmp_path_factory.mktemp('''data''' ) / '''dataset.img.zip''' with zipfile.ZipFile(lowerCamelCase__ , '''w''' ) as f: f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) ) f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ).replace('''.jpg''' , '''2.jpg''' ) ) return path @pytest.fixture(scope='''session''' ) def snake_case__ ( lowerCamelCase__ : Any ) -> Tuple: A_ : Union[str, Any] = tmp_path_factory.mktemp('''data_dir''' ) (data_dir / "subdir").mkdir() with open(data_dir / '''subdir''' / '''train.txt''' , '''w''' ) as f: f.write('''foo\n''' * 1_0 ) with open(data_dir / '''subdir''' / '''test.txt''' , '''w''' ) as f: f.write('''bar\n''' * 1_0 ) # hidden file with open(data_dir / '''subdir''' / '''.test.txt''' , '''w''' ) as f: f.write('''bar\n''' * 1_0 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / '''.subdir''' / '''train.txt''' , '''w''' ) as f: f.write('''foo\n''' * 1_0 ) with open(data_dir / '''.subdir''' / '''test.txt''' , '''w''' ) as f: f.write('''bar\n''' * 1_0 ) return data_dir
4
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : list ) -> list: if len(lowerCamelCase__ ) <= 1: return [tuple(lowerCamelCase__ )] A_ : List[str] = [] def generate(lowerCamelCase__ : int , lowerCamelCase__ : list ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , lowerCamelCase__ ) for i in range(k - 1 ): if k % 2 == 0: # k is even A_ ,A_ : Optional[int] = arr[k - 1], arr[i] else: # k is odd A_ ,A_ : Union[str, Any] = arr[k - 1], arr[0] generate(k - 1 , lowerCamelCase__ ) generate(len(lowerCamelCase__ ) , lowerCamelCase__ ) return res if __name__ == "__main__": snake_case__ = input("""Enter numbers separated by a comma:\n""").strip() snake_case__ = [int(item) for item in user_input.split(""",""")] print(heaps(arr))
4
1
'''simple docstring''' import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin snake_case__ = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = XLMRobertaTokenizer _lowerCAmelCase = XLMRobertaTokenizerFast _lowerCAmelCase = True _lowerCAmelCase = True def _a ( self : str ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing A_ : int = XLMRobertaTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def _a ( self : Optional[int] ): """simple docstring""" A_ : Union[str, Any] = '''<pad>''' A_ : Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase ) def _a ( self : Any ): """simple docstring""" A_ : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(_lowerCamelCase ) , 1002 ) def _a ( self : List[Any] ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1002 ) def _a ( self : List[Any] ): """simple docstring""" A_ : List[str] = XLMRobertaTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase ) A_ : Dict = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_lowerCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) A_ : int = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _lowerCamelCase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) A_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_lowerCamelCase ) self.assertListEqual( _lowerCamelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) A_ : str = tokenizer.convert_ids_to_tokens(_lowerCamelCase ) self.assertListEqual( _lowerCamelCase , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def _a ( self : Optional[Any] ): """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return A_ : Dict = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): A_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase ) A_ : List[str] = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase ) A_ : Optional[int] = tempfile.mkdtemp() A_ : List[Any] = tokenizer_r.save_pretrained(_lowerCamelCase ) A_ : List[Any] = tokenizer_p.save_pretrained(_lowerCamelCase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) A_ : List[str] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(_lowerCamelCase , _lowerCamelCase ) # Checks everything loads correctly in the same way A_ : Union[str, Any] = tokenizer_r.from_pretrained(_lowerCamelCase ) A_ : List[Any] = tokenizer_p.from_pretrained(_lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(_lowerCamelCase ) # Save tokenizer rust, legacy_format=True A_ : Optional[Any] = tempfile.mkdtemp() A_ : Tuple = tokenizer_r.save_pretrained(_lowerCamelCase , legacy_format=_lowerCamelCase ) A_ : Tuple = tokenizer_p.save_pretrained(_lowerCamelCase ) # Checks it save with the same files self.assertSequenceEqual(_lowerCamelCase , _lowerCamelCase ) # Checks everything loads correctly in the same way A_ : Any = tokenizer_r.from_pretrained(_lowerCamelCase ) A_ : Union[str, Any] = tokenizer_p.from_pretrained(_lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) ) shutil.rmtree(_lowerCamelCase ) # Save tokenizer rust, legacy_format=False A_ : Tuple = tempfile.mkdtemp() A_ : Optional[Any] = tokenizer_r.save_pretrained(_lowerCamelCase , legacy_format=_lowerCamelCase ) A_ : Optional[int] = tokenizer_p.save_pretrained(_lowerCamelCase ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way A_ : Optional[Any] = tokenizer_r.from_pretrained(_lowerCamelCase ) A_ : List[str] = tokenizer_p.from_pretrained(_lowerCamelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) ) shutil.rmtree(_lowerCamelCase ) @cached_property def _a ( self : Tuple ): """simple docstring""" return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' ) def _a ( self : Tuple ): """simple docstring""" with tempfile.NamedTemporaryFile() as f: shutil.copyfile(_lowerCamelCase , f.name ) A_ : int = XLMRobertaTokenizer(f.name , keep_accents=_lowerCamelCase ) A_ : Tuple = pickle.dumps(_lowerCamelCase ) pickle.loads(_lowerCamelCase ) def _a ( self : List[Any] ): """simple docstring""" if not self.test_rust_tokenizer: return A_ : Union[str, Any] = self.get_tokenizer() A_ : List[Any] = self.get_rust_tokenizer() A_ : Union[str, Any] = '''I was born in 92000, and this is falsé.''' A_ : Any = tokenizer.tokenize(_lowerCamelCase ) A_ : List[str] = rust_tokenizer.tokenize(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Any = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) A_ : Tuple = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) A_ : List[Any] = self.get_rust_tokenizer() A_ : Any = tokenizer.encode(_lowerCamelCase ) A_ : Tuple = rust_tokenizer.encode(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) @slow def _a ( self : List[Any] ): """simple docstring""" A_ : Tuple = '''Hello World!''' A_ : Optional[Any] = [0, 35378, 6661, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) ) @slow def _a ( self : Tuple ): """simple docstring""" A_ : Dict = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) A_ : Dict = [ 0, 3293, 83, 10, 4552, 4989, 7986, 678, 10, 5915, 111, 179459, 124850, 4, 6044, 237, 12, 6, 5, 6, 4, 6780, 705, 15, 1388, 44, 378, 10114, 711, 152, 20, 6, 5, 22376, 642, 1221, 15190, 34153, 450, 5608, 959, 1119, 57702, 136, 186, 47, 1098, 29367, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 6044, 237, 6284, 50901, 528, 31, 90, 34, 927, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(_lowerCamelCase , self.big_tokenizer.encode(_lowerCamelCase ) ) @slow def _a ( self : Optional[int] ): """simple docstring""" A_ : Optional[Any] = {'''input_ids''': [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCamelCase , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
4
'''simple docstring''' import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : Dict ): """simple docstring""" A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : Tuple = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : Dict = -1 A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : Any = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) A_ : List[str] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: A_ : List[str] = TextStreamer(_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer A_ : Dict = cs.out[:-1] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : Dict = -1 A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : Optional[int] = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) A_ : str = tokenizer.decode(greedy_ids[0] ) A_ : int = TextIteratorStreamer(_lowerCamelCase ) A_ : List[Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} A_ : List[Any] = Thread(target=model.generate , kwargs=_lowerCamelCase ) thread.start() A_ : List[Any] = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : int ): """simple docstring""" A_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : List[str] = -1 A_ : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : Tuple = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) A_ : Tuple = greedy_ids[:, input_ids.shape[1] :] A_ : Tuple = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: A_ : Any = TextStreamer(_lowerCamelCase , skip_prompt=_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer A_ : Any = cs.out[:-1] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : List[Any] ): """simple docstring""" A_ : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' ) A_ : Tuple = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowerCamelCase ) A_ : List[Any] = -1 A_ : Union[str, Any] = torch.ones((1, 5) , device=_lowerCamelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: A_ : List[Any] = TextStreamer(_lowerCamelCase , skip_special_tokens=_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=1 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token A_ : List[str] = cs.out[:-1] # Remove the final "\n" A_ : List[Any] = tokenizer(_lowerCamelCase , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : Union[str, Any] = -1 A_ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : List[str] = TextIteratorStreamer(_lowerCamelCase , timeout=0.0_01 ) A_ : str = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} A_ : List[str] = Thread(target=model.generate , kwargs=_lowerCamelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_lowerCamelCase ): A_ : str = '''''' for new_text in streamer: streamer_text += new_text
4
1
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available snake_case__ = { """configuration_efficientnet""": [ """EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EfficientNetConfig""", """EfficientNetOnnxConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ = ["""EfficientNetImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ = [ """EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """EfficientNetForImageClassification""", """EfficientNetModel""", """EfficientNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_efficientnet import ( EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientNetConfig, EfficientNetOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientnet import EfficientNetImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientnet import ( EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientNetForImageClassification, EfficientNetModel, EfficientNetPreTrainedModel, ) else: import sys snake_case__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
4
'''simple docstring''' import heapq def snake_case__ ( lowerCamelCase__ : dict ) -> set[int]: A_ : list[list] = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(lowerCamelCase__ , [-1 * len(lowerCamelCase__ ), (key, value)] ) # chosen_vertices = set of chosen vertices A_ : str = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices A_ : Tuple = heapq.heappop(lowerCamelCase__ )[1][0] chosen_vertices.add(lowerCamelCase__ ) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: A_ : List[str] = elem[1][1].index(lowerCamelCase__ ) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(lowerCamelCase__ ) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() snake_case__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
4
1
'''simple docstring''' import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase_ : """simple docstring""" def __init__( self : Dict , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple=13 , _lowerCamelCase : Optional[Any]=30 , _lowerCamelCase : str=2 , _lowerCamelCase : Any=3 , _lowerCamelCase : int=True , _lowerCamelCase : List[str]=True , _lowerCamelCase : str=32 , _lowerCamelCase : Any=5 , _lowerCamelCase : int=4 , _lowerCamelCase : str=37 , _lowerCamelCase : Optional[int]="gelu" , _lowerCamelCase : Optional[Any]=0.1 , _lowerCamelCase : Union[str, Any]=0.1 , _lowerCamelCase : Optional[int]=10 , _lowerCamelCase : str=0.02 , _lowerCamelCase : List[str]=None , _lowerCamelCase : Any=2 , ): """simple docstring""" A_ : Dict = parent A_ : int = batch_size A_ : int = image_size A_ : Optional[Any] = patch_size A_ : Optional[int] = num_channels A_ : List[Any] = is_training A_ : Dict = use_labels A_ : Optional[Any] = hidden_size A_ : List[str] = num_hidden_layers A_ : List[Any] = num_attention_heads A_ : Optional[Any] = intermediate_size A_ : Any = hidden_act A_ : Union[str, Any] = hidden_dropout_prob A_ : Tuple = attention_probs_dropout_prob A_ : List[Any] = type_sequence_label_size A_ : Optional[Any] = initializer_range A_ : List[str] = scope A_ : int = encoder_stride # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A_ : Union[str, Any] = (image_size // patch_size) ** 2 A_ : Any = num_patches + 1 def _a ( self : Optional[Any] ): """simple docstring""" A_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : Optional[Any] = None if self.use_labels: A_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : Optional[int] = self.get_config() return config, pixel_values, labels def _a ( self : Dict ): """simple docstring""" return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def _a ( self : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] ): """simple docstring""" A_ : Union[str, Any] = ViTModel(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : Optional[Any] = model(_lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : int , _lowerCamelCase : Dict , _lowerCamelCase : Dict , _lowerCamelCase : Tuple ): """simple docstring""" A_ : Tuple = ViTForMaskedImageModeling(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : Any = model(_lowerCamelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A_ : List[Any] = 1 A_ : int = ViTForMaskedImageModeling(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A_ : List[str] = model(_lowerCamelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def _a ( self : List[str] , _lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any] ): """simple docstring""" A_ : Optional[int] = self.type_sequence_label_size A_ : Optional[Any] = ViTForImageClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A_ : str = 1 A_ : Optional[Any] = ViTForImageClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A_ : List[Any] = model(_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Any = self.prepare_config_and_inputs() ( ( A_ ) ,( A_ ) ,( A_ ) , ) : Dict = config_and_inputs A_ : List[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class UpperCamelCase_ (a__, a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = ( ( ViTModel, ViTForImageClassification, ViTForMaskedImageModeling, ) if is_torch_available() else () ) _lowerCAmelCase = ( {'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification} if is_torch_available() else {} ) _lowerCAmelCase = True _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False def _a ( self : Optional[int] ): """simple docstring""" A_ : Union[str, Any] = ViTModelTester(self ) A_ : str = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 ) def _a ( self : Optional[Any] ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def _a ( self : str ): """simple docstring""" pass def _a ( self : Tuple ): """simple docstring""" A_ ,A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Any = model_class(_lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A_ : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) ) def _a ( self : int ): """simple docstring""" A_ ,A_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : List[Any] = model_class(_lowerCamelCase ) A_ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : Optional[int] = [*signature.parameters.keys()] A_ : List[str] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def _a ( self : Dict ): """simple docstring""" A_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def _a ( self : List[str] ): """simple docstring""" A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCamelCase ) def _a ( self : List[str] ): """simple docstring""" A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) @slow def _a ( self : Dict ): """simple docstring""" for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Optional[int] = ViTModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def snake_case__ ( ) -> Optional[int]: A_ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : Optional[int] ): """simple docstring""" return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None @slow def _a ( self : List[str] ): """simple docstring""" A_ : List[str] = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ).to(_lowerCamelCase ) A_ : int = self.default_image_processor A_ : Optional[Any] = prepare_img() A_ : Optional[Any] = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase ) # forward pass with torch.no_grad(): A_ : List[Any] = model(**_lowerCamelCase ) # verify the logits A_ : Optional[int] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) A_ : Any = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) ) @slow def _a ( self : int ): """simple docstring""" A_ : str = ViTModel.from_pretrained('''facebook/dino-vits8''' ).to(_lowerCamelCase ) A_ : List[Any] = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''' , size=480 ) A_ : Optional[Any] = prepare_img() A_ : List[Any] = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ) A_ : Optional[int] = inputs.pixel_values.to(_lowerCamelCase ) # forward pass with torch.no_grad(): A_ : Union[str, Any] = model(_lowerCamelCase , interpolate_pos_encoding=_lowerCamelCase ) # verify the logits A_ : str = torch.Size((1, 3601, 384) ) self.assertEqual(outputs.last_hidden_state.shape , _lowerCamelCase ) A_ : List[Any] = torch.tensor( [[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def _a ( self : Tuple ): """simple docstring""" A_ : Tuple = ViTModel.from_pretrained('''facebook/dino-vits8''' , torch_dtype=torch.floataa , device_map='''auto''' ) A_ : Dict = self.default_image_processor A_ : str = prepare_img() A_ : Any = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ) A_ : int = inputs.pixel_values.to(_lowerCamelCase ) # forward pass to make sure inference works in fp16 with torch.no_grad(): A_ : List[str] = model(_lowerCamelCase )
4
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() snake_case__ = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) snake_case__ = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias')) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias')) rename_keys.append( (F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight', F'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias', F'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias')) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias')) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""), ("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ] ) def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ) -> Optional[Any]: A_ : Tuple = state_dict.pop(lowerCamelCase__ ) A_ : Optional[Any] = val def snake_case__ ( lowerCamelCase__ : Dict ) -> Any: A_ : int = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: A_ : int = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' ) A_ : List[str] = value else: A_ : Optional[int] = value return new_state_dict def snake_case__ ( lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]: A_ : Any = '''''' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) A_ : Tuple = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' ) A_ : Dict = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict A_ : str = in_proj_weight[:2_5_6, :] A_ : Optional[Any] = in_proj_bias[:2_5_6] A_ : Dict = in_proj_weight[2_5_6:5_1_2, :] A_ : Tuple = in_proj_bias[2_5_6:5_1_2] A_ : Tuple = in_proj_weight[-2_5_6:, :] A_ : Optional[int] = in_proj_bias[-2_5_6:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention A_ : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' ) A_ : Dict = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict A_ : List[str] = in_proj_weight[:2_5_6, :] A_ : int = in_proj_bias[:2_5_6] A_ : Any = in_proj_weight[2_5_6:5_1_2, :] A_ : List[str] = in_proj_bias[2_5_6:5_1_2] A_ : Union[str, Any] = in_proj_weight[-2_5_6:, :] A_ : Optional[Any] = in_proj_bias[-2_5_6:] # read in weights + bias of input projection layer of cross-attention A_ : Tuple = state_dict.pop( f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' ) A_ : Optional[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' ) # next, add query, keys and values (in that order) of cross-attention to the state dict A_ : Dict = in_proj_weight_cross_attn[:2_5_6, :] A_ : Tuple = in_proj_bias_cross_attn[:2_5_6] A_ : int = in_proj_weight_cross_attn[2_5_6:5_1_2, :] A_ : List[str] = in_proj_bias_cross_attn[2_5_6:5_1_2] A_ : Any = in_proj_weight_cross_attn[-2_5_6:, :] A_ : Any = in_proj_bias_cross_attn[-2_5_6:] def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ) -> Dict: A_ ,A_ : int = image.size A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ ) A_ : Optional[Any] = 8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0 A_ : Union[str, Any] = target_max_size / current_max_size A_ : Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def snake_case__ ( lowerCamelCase__ : Tuple ) -> str: A_ : Any = F.to_tensor(lowerCamelCase__ ) A_ : Optional[Any] = F.normalize(lowerCamelCase__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ) return image @torch.no_grad() def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> str: logger.info('''Converting model...''' ) # load original state dict A_ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location='''cpu''' ) # rename keys for src, dest in rename_keys: rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) A_ : str = rename_backbone_keys(lowerCamelCase__ ) # query, key and value matrices need special treatment read_in_q_k_v(lowerCamelCase__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them A_ : List[Any] = '''model.''' for key in state_dict.copy().keys(): if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ): A_ : List[Any] = state_dict.pop(lowerCamelCase__ ) A_ : str = val # create HuggingFace model and load state dict A_ : Union[str, Any] = TableTransformerConfig( backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: A_ : Dict = 1_5 A_ : Dict = 2 A_ : int = {0: '''table''', 1: '''table rotated'''} A_ : List[str] = idalabel A_ : Optional[int] = {v: k for k, v in idalabel.items()} else: A_ : Union[str, Any] = 1_2_5 A_ : Optional[Any] = 6 A_ : Optional[Any] = { 0: '''table''', 1: '''table column''', 2: '''table row''', 3: '''table column header''', 4: '''table projected row header''', 5: '''table spanning cell''', } A_ : int = idalabel A_ : Tuple = {v: k for k, v in idalabel.items()} A_ : Optional[Any] = DetrImageProcessor( format='''coco_detection''' , max_size=8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0 ) A_ : int = TableTransformerForObjectDetection(lowerCamelCase__ ) model.load_state_dict(lowerCamelCase__ ) model.eval() # verify our conversion A_ : Optional[int] = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png''' A_ : Union[str, Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=lowerCamelCase__ ) A_ : Tuple = Image.open(lowerCamelCase__ ).convert('''RGB''' ) A_ : int = normalize(resize(lowerCamelCase__ , lowerCamelCase__ ) ).unsqueeze(0 ) A_ : str = model(lowerCamelCase__ ) if "detection" in checkpoint_url: A_ : str = (1, 1_5, 3) A_ : int = torch.tensor( [[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] ) A_ : Tuple = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] ) else: A_ : Optional[int] = (1, 1_2_5, 7) A_ : Dict = torch.tensor( [[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] ) A_ : Any = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' ) Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) model.save_pretrained(lowerCamelCase__ ) image_processor.save_pretrained(lowerCamelCase__ ) if push_to_hub: # Push model to HF hub logger.info('''Pushing model to the hub...''' ) A_ : List[Any] = ( '''microsoft/table-transformer-detection''' if '''detection''' in checkpoint_url else '''microsoft/table-transformer-structure-recognition''' ) model.push_to_hub(lowerCamelCase__ ) image_processor.push_to_hub(lowerCamelCase__ ) if __name__ == "__main__": snake_case__ = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", type=str, choices=[ """https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", """https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""", ], help="""URL of the Table Transformer checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) snake_case__ = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
4
1
'''simple docstring''' import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() snake_case__ = logging.get_logger(__name__) def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : str ) -> str: A_ : List[str] = RobertaPreLayerNormConfig.from_pretrained( lowerCamelCase__ , architectures=['''RobertaPreLayerNormForMaskedLM'''] ) # convert state_dict A_ : Union[str, Any] = torch.load(hf_hub_download(repo_id=lowerCamelCase__ , filename='''pytorch_model.bin''' ) ) A_ : Dict = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith('''roberta.''' ): A_ : Tuple = '''roberta_prelayernorm.''' + tensor_key[len('''roberta.''' ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith('''.self.LayerNorm.weight''' ) or tensor_key.endswith('''.self.LayerNorm.bias''' ): continue A_ : str = tensor_value A_ : str = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=lowerCamelCase__ , config=lowerCamelCase__ , state_dict=lowerCamelCase__ ) model.save_pretrained(lowerCamelCase__ ) # convert tokenizer A_ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase__ ) tokenizer.save_pretrained(lowerCamelCase__ ) if __name__ == "__main__": snake_case__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint-repo""", default=None, type=str, required=True, help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) snake_case__ = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
4
'''simple docstring''' import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) snake_case__ = logging.getLogger(__name__) @dataclass(frozen=a__ ) class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None @dataclass(frozen=a__ ) class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None if is_torch_available(): import torch from torch.utils.data import Dataset class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 42 def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : List[Any]=False , _lowerCamelCase : bool = False , ): """simple docstring""" A_ : Optional[int] = hans_processors[task]() A_ : int = os.path.join( _lowerCamelCase , '''cached_{}_{}_{}_{}'''.format( '''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(_lowerCamelCase ) , _lowerCamelCase , ) , ) A_ : Dict = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) A_ ,A_ : List[str] = label_list[2], label_list[1] A_ : Optional[int] = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. A_ : str = cached_features_file + '''.lock''' with FileLock(_lowerCamelCase ): if os.path.exists(_lowerCamelCase ) and not overwrite_cache: logger.info(f'Loading features from cached file {cached_features_file}' ) A_ : List[str] = torch.load(_lowerCamelCase ) else: logger.info(f'Creating features from dataset file at {data_dir}' ) A_ : Optional[int] = ( processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase ) ) logger.info('''Training examples: %s''' , len(_lowerCamelCase ) ) A_ : Optional[int] = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) logger.info('''Saving features into cached file %s''' , _lowerCamelCase ) torch.save(self.features , _lowerCamelCase ) def __len__( self : List[str] ): """simple docstring""" return len(self.features ) def __getitem__( self : List[str] , _lowerCamelCase : Optional[int] ): """simple docstring""" return self.features[i] def _a ( self : str ): """simple docstring""" return self.label_list if is_tf_available(): import tensorflow as tf class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = 128 , _lowerCamelCase : Dict=False , _lowerCamelCase : bool = False , ): """simple docstring""" A_ : Optional[int] = hans_processors[task]() A_ : Optional[int] = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) A_ ,A_ : Union[str, Any] = label_list[2], label_list[1] A_ : Tuple = label_list A_ : Optional[int] = processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase ) A_ : Tuple = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ): if ex_index % 10000 == 0: logger.info('''Writing example %d of %d''' % (ex_index, len(_lowerCamelCase )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) A_ : List[Any] = tf.data.Dataset.from_generator( _lowerCamelCase , ( { '''example_id''': tf.intaa, '''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa, }, tf.intaa, ) , ( { '''example_id''': tf.TensorShape([] ), '''input_ids''': tf.TensorShape([None, None] ), '''attention_mask''': tf.TensorShape([None, None] ), '''token_type_ids''': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def _a ( self : Any ): """simple docstring""" return self.dataset def __len__( self : Dict ): """simple docstring""" return len(self.features ) def __getitem__( self : Optional[int] , _lowerCamelCase : List[str] ): """simple docstring""" return self.features[i] def _a ( self : Tuple ): """simple docstring""" return self.label_list class UpperCamelCase_ (a__ ): """simple docstring""" def _a ( self : List[str] , _lowerCamelCase : Union[str, Any] ): """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_train_set.txt''' ) ) , '''train''' ) def _a ( self : List[str] , _lowerCamelCase : Tuple ): """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' ) def _a ( self : Any ): """simple docstring""" return ["contradiction", "entailment", "neutral"] def _a ( self : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ): """simple docstring""" A_ : Tuple = [] for i, line in enumerate(_lowerCamelCase ): if i == 0: continue A_ : str = '''%s-%s''' % (set_type, line[0]) A_ : Optional[Any] = line[5] A_ : Union[str, Any] = line[6] A_ : List[str] = line[7][2:] if line[7].startswith('''ex''' ) else line[7] A_ : str = line[0] examples.append(InputExample(guid=_lowerCamelCase , text_a=_lowerCamelCase , text_b=_lowerCamelCase , label=_lowerCamelCase , pairID=_lowerCamelCase ) ) return examples def snake_case__ ( lowerCamelCase__ : List[InputExample] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : PreTrainedTokenizer , ) -> int: A_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase__ )} A_ : Optional[Any] = [] for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase__ ) , desc='''convert examples to features''' ): if ex_index % 1_0_0_0_0 == 0: logger.info('''Writing example %d''' % (ex_index) ) A_ : Optional[int] = tokenizer( example.text_a , example.text_b , add_special_tokens=lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' , truncation=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , ) A_ : List[str] = label_map[example.label] if example.label in label_map else 0 A_ : Tuple = int(example.pairID ) features.append(InputFeatures(**lowerCamelCase__ , label=lowerCamelCase__ , pairID=lowerCamelCase__ ) ) for i, example in enumerate(examples[:5] ): logger.info('''*** Example ***''' ) logger.info(f'guid: {example}' ) logger.info(f'features: {features[i]}' ) return features snake_case__ = { """hans""": 3, } snake_case__ = { """hans""": HansProcessor, }
4
1
'''simple docstring''' import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller snake_case__ = 3 def snake_case__ ( lowerCamelCase__ : int ) -> int: print('''Generating primitive root of p''' ) while True: A_ : List[Any] = random.randrange(3 , lowerCamelCase__ ) if pow(lowerCamelCase__ , 2 , lowerCamelCase__ ) == 1: continue if pow(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) == 1: continue return g def snake_case__ ( lowerCamelCase__ : int ) -> tuple[tuple[int, int, int, int], tuple[int, int]]: print('''Generating prime p...''' ) A_ : List[str] = rabin_miller.generate_large_prime(lowerCamelCase__ ) # select large prime number. A_ : str = primitive_root(lowerCamelCase__ ) # one primitive root on modulo p. A_ : List[str] = random.randrange(3 , lowerCamelCase__ ) # private_key -> have to be greater than 2 for safety. A_ : Tuple = cryptomath.find_mod_inverse(pow(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) A_ : str = (key_size, e_a, e_a, p) A_ : Dict = (key_size, d) return public_key, private_key def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : int ) -> None: if os.path.exists(f'{name}_pubkey.txt' ) or os.path.exists(f'{name}_privkey.txt' ): print('''\nWARNING:''' ) print( f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n' '''Use a different name or delete these files and re-run this program.''' ) sys.exit() A_ ,A_ : Optional[int] = generate_key(lowerCamelCase__ ) print(f'\nWriting public key to file {name}_pubkey.txt...' ) with open(f'{name}_pubkey.txt' , '''w''' ) as fo: fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' ) print(f'Writing private key to file {name}_privkey.txt...' ) with open(f'{name}_privkey.txt' , '''w''' ) as fo: fo.write(f'{private_key[0]},{private_key[1]}' ) def snake_case__ ( ) -> None: print('''Making key files...''' ) make_key_files('''elgamal''' , 2_0_4_8 ) print('''Key files generation successful''' ) if __name__ == "__main__": main()
4
'''simple docstring''' import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline snake_case__ = datasets.utils.logging.get_logger(__name__) @dataclass class UpperCamelCase_ (datasets.BuilderConfig ): """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = "utf-8" _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = True # deprecated _lowerCAmelCase = None # deprecated _lowerCAmelCase = 1_0 << 2_0 # 10MB _lowerCAmelCase = None class UpperCamelCase_ (datasets.ArrowBasedBuilder ): """simple docstring""" _lowerCAmelCase = JsonConfig def _a ( self : int ): """simple docstring""" if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' ) A_ : List[Any] = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' ) if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' ) return datasets.DatasetInfo(features=self.config.features ) def _a ( self : Any , _lowerCamelCase : List[str] ): """simple docstring""" if not self.config.data_files: raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' ) A_ : int = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_lowerCamelCase , (str, list, tuple) ): A_ : Union[str, Any] = data_files if isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : List[str] = [files] A_ : List[Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] A_ : Tuple = [] for split_name, files in data_files.items(): if isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : int = [files] A_ : Union[str, Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files] splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'''files''': files} ) ) return splits def _a ( self : int , _lowerCamelCase : pa.Table ): """simple docstring""" if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): A_ : Optional[int] = self.config.features.arrow_schema.field(_lowerCamelCase ).type A_ : Optional[int] = pa_table.append_column(_lowerCamelCase , pa.array([None] * len(_lowerCamelCase ) , type=_lowerCamelCase ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example A_ : str = table_cast(_lowerCamelCase , self.config.features.arrow_schema ) return pa_table def _a ( self : List[str] , _lowerCamelCase : int ): """simple docstring""" for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: A_ : int = json.load(_lowerCamelCase ) # We keep only the field we are interested in A_ : List[str] = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(_lowerCamelCase , (list, tuple) ): A_ : int = set().union(*[row.keys() for row in dataset] ) A_ : List[str] = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys} else: A_ : Tuple = dataset A_ : Dict = pa.Table.from_pydict(_lowerCamelCase ) yield file_idx, self._cast_table(_lowerCamelCase ) # If the file has one json object per line else: with open(_lowerCamelCase , '''rb''' ) as f: A_ : int = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small A_ : int = max(self.config.chunksize // 32 , 16 << 10 ) A_ : int = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: A_ : Any = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(_lowerCamelCase ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": A_ : Optional[Any] = batch.decode(self.config.encoding , errors=_lowerCamelCase ).encode('''utf-8''' ) try: while True: try: A_ : List[Any] = paj.read_json( io.BytesIO(_lowerCamelCase ) , read_options=paj.ReadOptions(block_size=_lowerCamelCase ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(_lowerCamelCase , pa.ArrowInvalid ) and "straddling" not in str(_lowerCamelCase ) or block_size > len(_lowerCamelCase ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f'Batch of {len(_lowerCamelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( _lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: A_ : Optional[Any] = json.load(_lowerCamelCase ) except json.JSONDecodeError: logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(_lowerCamelCase , _lowerCamelCase ): # list is the only sequence type supported in JSON try: A_ : Optional[int] = set().union(*[row.keys() for row in dataset] ) A_ : Tuple = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys} A_ : int = pa.Table.from_pydict(_lowerCamelCase ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' ) raise ValueError(f'Not able to read records in the JSON file at {file}.' ) from None yield file_idx, self._cast_table(_lowerCamelCase ) break else: logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' ) raise ValueError( f'Not able to read records in the JSON file at {file}. ' f'You should probably indicate the field of the JSON file containing your records. ' f'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. ' f'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_lowerCamelCase ) batch_idx += 1
4
1
'''simple docstring''' import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple ) -> Tuple: if isinstance(lowerCamelCase__ , lowerCamelCase__ ): A_ : Tuple = np.full((len(lowerCamelCase__ ), sequence_length, 2) , lowerCamelCase__ ) else: A_ : Optional[Any] = np.full((len(lowerCamelCase__ ), sequence_length) , lowerCamelCase__ ) for i, tensor in enumerate(lowerCamelCase__ ): if padding_side == "right": if isinstance(lowerCamelCase__ , lowerCamelCase__ ): A_ : Union[str, Any] = tensor[:sequence_length] else: A_ : str = tensor[:sequence_length] else: if isinstance(lowerCamelCase__ , lowerCamelCase__ ): A_ : str = tensor[:sequence_length] else: A_ : Union[str, Any] = tensor[:sequence_length] return out_tensor.tolist() def snake_case__ ( lowerCamelCase__ : List[str] ) -> Any: A_ : int = ord(lowerCamelCase__ ) if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6): return True A_ : List[Any] = unicodedata.category(lowerCamelCase__ ) if cat.startswith('''P''' ): return True return False @dataclass class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = True _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = -1_0_0 _lowerCAmelCase = "pt" def _a ( self : Tuple , _lowerCamelCase : List[Any] ): """simple docstring""" import torch A_ : Any = '''label''' if '''label''' in features[0].keys() else '''labels''' A_ : int = [feature[label_name] for feature in features] if label_name in features[0].keys() else None A_ : List[str] = self.tokenizer.pad( _lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , ) if labels is None: return batch A_ : Dict = torch.tensor(batch['''entity_ids'''] ).shape[1] A_ : List[str] = self.tokenizer.padding_side if padding_side == "right": A_ : Dict = [ list(_lowerCamelCase ) + [self.label_pad_token_id] * (sequence_length - len(_lowerCamelCase )) for label in labels ] else: A_ : str = [ [self.label_pad_token_id] * (sequence_length - len(_lowerCamelCase )) + list(_lowerCamelCase ) for label in labels ] A_ : List[Any] = [feature['''ner_tags'''] for feature in features] A_ : Dict = padding_tensor(_lowerCamelCase , -1 , _lowerCamelCase , _lowerCamelCase ) A_ : Any = [feature['''original_entity_spans'''] for feature in features] A_ : Dict = padding_tensor(_lowerCamelCase , (-1, -1) , _lowerCamelCase , _lowerCamelCase ) A_ : Union[str, Any] = {k: torch.tensor(_lowerCamelCase , dtype=torch.intaa ) for k, v in batch.items()} return batch
4
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices snake_case__ = logging.get_logger(__name__) snake_case__ = { """microsoft/swin-tiny-patch4-window7-224""": ( """https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json""" ), # See all Swin models at https://huggingface.co/models?filter=swin } class UpperCamelCase_ (a__, a__ ): """simple docstring""" _lowerCAmelCase = 'swin' _lowerCAmelCase = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : Any , _lowerCamelCase : Optional[Any]=224 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Tuple=96 , _lowerCamelCase : List[Any]=[2, 2, 6, 2] , _lowerCamelCase : List[str]=[3, 6, 12, 24] , _lowerCamelCase : List[Any]=7 , _lowerCamelCase : Optional[int]=4.0 , _lowerCamelCase : List[str]=True , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Any=0.0 , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : Tuple=False , _lowerCamelCase : Dict=0.02 , _lowerCamelCase : Optional[Any]=1E-5 , _lowerCamelCase : Any=32 , _lowerCamelCase : Tuple=None , _lowerCamelCase : Any=None , **_lowerCamelCase : str , ): """simple docstring""" super().__init__(**_lowerCamelCase ) A_ : Optional[int] = image_size A_ : Optional[int] = patch_size A_ : Optional[int] = num_channels A_ : Any = embed_dim A_ : List[Any] = depths A_ : Any = len(_lowerCamelCase ) A_ : List[Any] = num_heads A_ : Tuple = window_size A_ : Tuple = mlp_ratio A_ : Dict = qkv_bias A_ : List[str] = hidden_dropout_prob A_ : List[str] = attention_probs_dropout_prob A_ : Any = drop_path_rate A_ : List[Any] = hidden_act A_ : Tuple = use_absolute_embeddings A_ : int = layer_norm_eps A_ : Optional[Any] = initializer_range A_ : Union[str, Any] = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model A_ : str = int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) ) A_ : str = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(_lowerCamelCase ) + 1 )] A_ ,A_ : Optional[Any] = get_aligned_output_features_output_indices( out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names ) class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = version.parse('1.11' ) @property def _a ( self : str ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def _a ( self : Union[str, Any] ): """simple docstring""" return 1E-4
4
1
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : str ) -> str: return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
4
'''simple docstring''' from __future__ import annotations def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> list[int]: A_ : int = 0 A_ : str = len(lowerCamelCase__ ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: A_ : Tuple = i + 1 else: A_ : List[str] = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(F'{two_pointer([2, 7, 11, 15], 9) = }')
4
1
'''simple docstring''' import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast snake_case__ = datasets.utils.logging.get_logger(__name__) @dataclass class UpperCamelCase_ (datasets.BuilderConfig ): """simple docstring""" _lowerCAmelCase = 1_0_0_0_0 _lowerCAmelCase = None _lowerCAmelCase = None class UpperCamelCase_ (datasets.ArrowBasedBuilder ): """simple docstring""" _lowerCAmelCase = ParquetConfig def _a ( self : Any ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def _a ( self : str , _lowerCamelCase : Dict ): """simple docstring""" if not self.config.data_files: raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' ) A_ : List[Any] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_lowerCamelCase , (str, list, tuple) ): A_ : Union[str, Any] = data_files if isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive A_ : Optional[int] = [dl_manager.iter_files(_lowerCamelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] A_ : str = [] for split_name, files in data_files.items(): if isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : Union[str, Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive A_ : Optional[int] = [dl_manager.iter_files(_lowerCamelCase ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(_lowerCamelCase ): with open(_lowerCamelCase , '''rb''' ) as f: A_ : Dict = datasets.Features.from_arrow_schema(pq.read_schema(_lowerCamelCase ) ) break splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'''files''': files} ) ) return splits def _a ( self : Tuple , _lowerCamelCase : pa.Table ): """simple docstring""" if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example A_ : Optional[Any] = table_cast(_lowerCamelCase , self.info.features.arrow_schema ) return pa_table def _a ( self : List[str] , _lowerCamelCase : Optional[int] ): """simple docstring""" A_ : List[str] = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( f'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' ) for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ): with open(_lowerCamelCase , '''rb''' ) as f: A_ : List[str] = pq.ParquetFile(_lowerCamelCase ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): A_ : Any = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f'{file_idx}_{batch_idx}', self._cast_table(_lowerCamelCase ) except ValueError as e: logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' ) raise
4
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool: return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(lowerCamelCase__ ) ) def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool: # Base Case if index == len(lowerCamelCase__ ): return True # Recursive Step for i in range(lowerCamelCase__ ): if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ): # Color current vertex A_ : int = i # Validate coloring if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ): return True # Backtrack A_ : str = -1 return False def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[int]: A_ : List[str] = [-1] * len(lowerCamelCase__ ) if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ): return colored_vertices return []
4
1
'''simple docstring''' class UpperCamelCase_ : """simple docstring""" def __init__( self : Tuple , _lowerCamelCase : Union[str, Any] ): """simple docstring""" A_ : List[Any] = arr.split(''',''' ) def _a ( self : Optional[int] ): """simple docstring""" A_ : Dict = [int(self.array[0] )] * len(self.array ) A_ : Tuple = [int(self.array[0] )] * len(self.array ) for i in range(1 , len(self.array ) ): A_ : int = max( int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) ) A_ : Union[str, Any] = max(sum_value[i] , rear[i - 1] ) return rear[len(self.array ) - 1] if __name__ == "__main__": snake_case__ = input("""please input some numbers:""") snake_case__ = SubArray(whole_array) snake_case__ = array.solve_sub_array() print(("""the results is:""", re))
4
'''simple docstring''' from __future__ import annotations from PIL import Image # Define glider example snake_case__ = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example snake_case__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def snake_case__ ( lowerCamelCase__ : list[list[int]] ) -> list[list[int]]: A_ : str = [] for i in range(len(lowerCamelCase__ ) ): A_ : Optional[Any] = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours A_ : Optional[int] = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(lowerCamelCase__ ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(lowerCamelCase__ ) - 1: neighbour_count += cells[i + 1][j] if i < len(lowerCamelCase__ ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. A_ : List[str] = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(lowerCamelCase__ ) return next_generation def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[Image.Image]: A_ : List[Any] = [] for _ in range(lowerCamelCase__ ): # Create output image A_ : Optional[int] = Image.new('''RGB''' , (len(cells[0] ), len(lowerCamelCase__ )) ) A_ : int = img.load() # Save cells to image for x in range(len(lowerCamelCase__ ) ): for y in range(len(cells[0] ) ): A_ : Optional[Any] = 2_5_5 - cells[y][x] * 2_5_5 A_ : str = (colour, colour, colour) # Save image images.append(lowerCamelCase__ ) A_ : Optional[int] = new_generation(lowerCamelCase__ ) return images if __name__ == "__main__": snake_case__ = generate_images(GLIDER, 16) images[0].save("""out.gif""", save_all=True, append_images=images[1:])
4
1
'''simple docstring''' import os snake_case__ = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 1_00, """D""": 5_00, """M""": 10_00} def snake_case__ ( lowerCamelCase__ : str ) -> int: A_ : Optional[Any] = 0 A_ : Any = 0 while index < len(lowerCamelCase__ ) - 1: A_ : List[str] = SYMBOLS[numerals[index]] A_ : Any = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def snake_case__ ( lowerCamelCase__ : int ) -> str: A_ : int = '''''' A_ : Optional[Any] = num // 1_0_0_0 numerals += m_count * "M" num %= 1_0_0_0 A_ : str = num // 1_0_0 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 1_0_0 A_ : Optional[Any] = num // 1_0 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 1_0 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def snake_case__ ( lowerCamelCase__ : str = "/p089_roman.txt" ) -> int: A_ : Optional[Any] = 0 with open(os.path.dirname(lowerCamelCase__ ) + roman_numerals_filename ) as filea: A_ : Any = filea.readlines() for line in lines: A_ : Any = line.strip() A_ : List[str] = parse_roman_numerals(lowerCamelCase__ ) A_ : List[str] = generate_roman_numerals(lowerCamelCase__ ) savings += len(lowerCamelCase__ ) - len(lowerCamelCase__ ) return savings if __name__ == "__main__": print(F'{solution() = }')
4
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Any = tempfile.mkdtemp() A_ : List[Any] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) A_ : Tuple = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], '''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], } A_ : List[Any] = os.path.join(self.tmpdirname , _lowerCamelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Dict , **_lowerCamelCase : Tuple ): """simple docstring""" return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def _a ( self : Optional[int] , **_lowerCamelCase : Optional[int] ): """simple docstring""" return BertTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def _a ( self : Optional[Any] , **_lowerCamelCase : Tuple ): """simple docstring""" return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def _a ( self : int ): """simple docstring""" A_ : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] A_ : Any = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _a ( self : int ): """simple docstring""" A_ : Tuple = self.get_tokenizer() A_ : Tuple = self.get_rust_tokenizer() A_ : Dict = self.get_image_processor() A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) A_ : str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase ) A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) A_ : List[Any] = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase ) self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase ) self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase ) def _a ( self : List[Any] ): """simple docstring""" A_ : List[str] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A_ : Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) A_ : Tuple = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 ) A_ : List[str] = AlignProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _lowerCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _lowerCamelCase ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Dict = self.get_image_processor() A_ : Any = self.get_tokenizer() A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : Any = self.prepare_image_inputs() A_ : List[Any] = image_processor(_lowerCamelCase , return_tensors='''np''' ) A_ : str = processor(images=_lowerCamelCase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _a ( self : Dict ): """simple docstring""" A_ : str = self.get_image_processor() A_ : List[str] = self.get_tokenizer() A_ : Optional[int] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : int = '''lower newer''' A_ : str = processor(text=_lowerCamelCase ) A_ : Dict = tokenizer(_lowerCamelCase , padding='''max_length''' , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _a ( self : str ): """simple docstring""" A_ : Optional[int] = self.get_image_processor() A_ : Optional[Any] = self.get_tokenizer() A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : List[Any] = '''lower newer''' A_ : Optional[int] = self.prepare_image_inputs() A_ : List[Any] = processor(text=_lowerCamelCase , images=_lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(_lowerCamelCase ): processor() def _a ( self : List[str] ): """simple docstring""" A_ : Optional[Any] = self.get_image_processor() A_ : Optional[int] = self.get_tokenizer() A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A_ : str = processor.batch_decode(_lowerCamelCase ) A_ : Union[str, Any] = tokenizer.batch_decode(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : str = self.get_image_processor() A_ : Tuple = self.get_tokenizer() A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : str = '''lower newer''' A_ : List[str] = self.prepare_image_inputs() A_ : Tuple = processor(text=_lowerCamelCase , images=_lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
4
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = ( 'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.' 'It takes two arguments named `image` which should be the original image, and `label` which should be a text ' 'describing the elements what should be identified in the segmentation mask. The tool returns the mask.' ) _lowerCAmelCase = 'CIDAS/clipseg-rd64-refined' _lowerCAmelCase = 'image_segmenter' _lowerCAmelCase = CLIPSegForImageSegmentation _lowerCAmelCase = ['image', 'text'] _lowerCAmelCase = ['image'] def __init__( self : Optional[int] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Union[str, Any] ): """simple docstring""" requires_backends(self , ['''vision'''] ) super().__init__(*_lowerCamelCase , **_lowerCamelCase ) def _a ( self : List[str] , _lowerCamelCase : "Image" , _lowerCamelCase : str ): """simple docstring""" return self.pre_processor(text=[label] , images=[image] , padding=_lowerCamelCase , return_tensors='''pt''' ) def _a ( self : Union[str, Any] , _lowerCamelCase : Optional[int] ): """simple docstring""" with torch.no_grad(): A_ : Optional[int] = self.model(**_lowerCamelCase ).logits return logits def _a ( self : List[str] , _lowerCamelCase : Optional[int] ): """simple docstring""" A_ : int = outputs.cpu().detach().numpy() A_ : Tuple = 0 A_ : List[str] = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
4
'''simple docstring''' import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = """▁""" snake_case__ = { """vocab_file""": """vocab.json""", """spm_file""": """sentencepiece.bpe.model""", } snake_case__ = { """vocab_file""": { """facebook/s2t-small-librispeech-asr""": ( """https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json""" ), }, """spm_file""": { """facebook/s2t-small-librispeech-asr""": ( """https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model""" ) }, } snake_case__ = { """facebook/s2t-small-librispeech-asr""": 10_24, } snake_case__ = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""] snake_case__ = {"""mustc""": MUSTC_LANGS} class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = VOCAB_FILES_NAMES _lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase = MAX_MODEL_INPUT_SIZES _lowerCAmelCase = ['input_ids', 'attention_mask'] _lowerCAmelCase = [] def __init__( self : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : str="<s>" , _lowerCamelCase : Union[str, Any]="</s>" , _lowerCamelCase : Dict="<pad>" , _lowerCamelCase : str="<unk>" , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : int=False , _lowerCamelCase : Any=None , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[Dict[str, Any]] = None , **_lowerCamelCase : Optional[int] , ): """simple docstring""" A_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , do_upper_case=_lowerCamelCase , do_lower_case=_lowerCamelCase , tgt_lang=_lowerCamelCase , lang_codes=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , ) A_ : Optional[int] = do_upper_case A_ : Tuple = do_lower_case A_ : Tuple = load_json(_lowerCamelCase ) A_ : Tuple = {v: k for k, v in self.encoder.items()} A_ : List[Any] = spm_file A_ : List[str] = load_spm(_lowerCamelCase , self.sp_model_kwargs ) if lang_codes is not None: A_ : Any = lang_codes A_ : Optional[Any] = LANGUAGES[lang_codes] A_ : Optional[Any] = [f'<lang:{lang}>' for lang in self.langs] A_ : Union[str, Any] = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs} A_ : Optional[int] = self.lang_tokens A_ : int = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: A_ : Dict = {} @property def _a ( self : Tuple ): """simple docstring""" return len(self.encoder ) @property def _a ( self : int ): """simple docstring""" return self._tgt_lang @tgt_lang.setter def _a ( self : List[str] , _lowerCamelCase : Any ): """simple docstring""" A_ : int = new_tgt_lang self.set_tgt_lang_special_tokens(_lowerCamelCase ) def _a ( self : Tuple , _lowerCamelCase : str ): """simple docstring""" A_ : List[str] = self.lang_code_to_id[tgt_lang] A_ : Optional[Any] = [lang_code_id] def _a ( self : Optional[Any] , _lowerCamelCase : str ): """simple docstring""" return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase ) def _a ( self : List[Any] , _lowerCamelCase : int ): """simple docstring""" return self.encoder.get(_lowerCamelCase , self.encoder[self.unk_token] ) def _a ( self : int , _lowerCamelCase : int ): """simple docstring""" return self.decoder.get(_lowerCamelCase , self.unk_token ) def _a ( self : int , _lowerCamelCase : List[str] ): """simple docstring""" A_ : List[Any] = [] A_ : Any = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: A_ : Union[str, Any] = self.sp_model.decode(_lowerCamelCase ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " A_ : Optional[Any] = [] else: current_sub_tokens.append(_lowerCamelCase ) A_ : Tuple = self.sp_model.decode(_lowerCamelCase ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def _a ( self : int , _lowerCamelCase : Dict , _lowerCamelCase : Any=None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def _a ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase ) A_ : Tuple = [1] * len(self.prefix_tokens ) A_ : Tuple = [1] if token_ids_a is None: return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones def _a ( self : Dict ): """simple docstring""" A_ : Union[str, Any] = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Union[str, Any] ): """simple docstring""" A_ : Dict = self.__dict__.copy() A_ : List[Any] = None return state def __setstate__( self : List[str] , _lowerCamelCase : Dict ): """simple docstring""" A_ : Dict = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): A_ : Optional[int] = {} A_ : int = load_spm(self.spm_file , self.sp_model_kwargs ) def _a ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ): """simple docstring""" A_ : Dict = Path(_lowerCamelCase ) assert save_dir.is_dir(), f'{save_directory} should be a directory' A_ : Optional[int] = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file'''] ) A_ : Optional[int] = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file'''] ) save_json(self.encoder , _lowerCamelCase ) if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , _lowerCamelCase ) elif not os.path.isfile(self.spm_file ): with open(_lowerCamelCase , '''wb''' ) as fi: A_ : List[str] = self.sp_model.serialized_model_proto() fi.write(_lowerCamelCase ) return (str(_lowerCamelCase ), str(_lowerCamelCase )) def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor: A_ : Tuple = sentencepiece.SentencePieceProcessor(**lowerCamelCase__ ) spm.Load(str(lowerCamelCase__ ) ) return spm def snake_case__ ( lowerCamelCase__ : str ) -> Union[Dict, List]: with open(lowerCamelCase__ , '''r''' ) as f: return json.load(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : str ) -> None: with open(lowerCamelCase__ , '''w''' ) as f: json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=2 )
4
1
'''simple docstring''' print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
4
'''simple docstring''' import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 snake_case__ = sys.version_info >= (3, 10) def snake_case__ ( lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : str=None ) -> List[Any]: return field(default_factory=lambda: default , metadata=lowerCamelCase__ ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 4_2 _lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = False _lowerCAmelCase = True _lowerCAmelCase = None class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'titi' _lowerCAmelCase = 'toto' class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'titi' _lowerCAmelCase = 'toto' _lowerCAmelCase = 4_2 @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = "toto" def _a ( self : Optional[Any] ): """simple docstring""" A_ : Optional[int] = BasicEnum(self.foo ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = "toto" def _a ( self : Tuple ): """simple docstring""" A_ : Optional[Any] = MixedTypeEnum(self.foo ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} ) _lowerCAmelCase = None _lowerCAmelCase = list_field(default=[] ) _lowerCAmelCase = list_field(default=[] ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = list_field(default=[] ) _lowerCAmelCase = list_field(default=[1, 2, 3] ) _lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) _lowerCAmelCase = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = field() _lowerCAmelCase = field() _lowerCAmelCase = field() def _a ( self : Tuple ): """simple docstring""" A_ : Tuple = BasicEnum(self.required_enum ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = field() _lowerCAmelCase = None _lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} ) _lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) if is_python_no_less_than_3_10: @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = False _lowerCAmelCase = True _lowerCAmelCase = None @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} ) _lowerCAmelCase = None _lowerCAmelCase = list_field(default=[] ) _lowerCAmelCase = list_field(default=[] ) class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : List[str] , _lowerCamelCase : argparse.ArgumentParser , _lowerCamelCase : argparse.ArgumentParser ): """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): A_ : Union[str, Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''} A_ : Optional[Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('''choices''' , _lowerCamelCase ) and yy.get('''choices''' , _lowerCamelCase ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['''type'''](_lowerCamelCase ) , yy['''type'''](_lowerCamelCase ) ) del xx["type"], yy["type"] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Optional[int] ): """simple docstring""" A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase ) A_ : Optional[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--bar''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--baz''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--flag''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Union[str, Any] = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5'''] ((A_) ,) : List[str] = parser.parse_args_into_dataclasses(_lowerCamelCase , look_for_args_file=_lowerCamelCase ) self.assertFalse(example.flag ) def _a ( self : Dict ): """simple docstring""" A_ : int = HfArgumentParser(_lowerCamelCase ) A_ : int = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=42 , type=_lowerCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Dict ): """simple docstring""" A_ : Any = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) expected.add_argument('''--baz''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('''--no_baz''' , action='''store_false''' , default=_lowerCamelCase , dest='''baz''' ) expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase ) A_ : Dict = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(_lowerCamelCase ) for dataclass_type in dataclass_types: A_ : Any = HfArgumentParser(_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : List[Any] = parser.parse_args([] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : Optional[int] = parser.parse_args(['''--foo''', '''--no_baz'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : Union[str, Any] = parser.parse_args(['''--foo''', '''--baz'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : List[str] = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : List[Any] = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) def _a ( self : List[Any] ): """simple docstring""" A_ : str = HfArgumentParser(_lowerCamelCase ) A_ : Optional[int] = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : str = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) A_ : List[Any] = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) A_ : int = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) A_ : Dict = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) A_ : Tuple = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) A_ : List[str] = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def _a ( self : Optional[int] ): """simple docstring""" @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = "toto" A_ : List[str] = HfArgumentParser(_lowerCamelCase ) A_ : Tuple = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Tuple = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) A_ : List[str] = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) A_ : int = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) def _a ( self : Dict ): """simple docstring""" A_ : int = HfArgumentParser(_lowerCamelCase ) A_ : List[Any] = argparse.ArgumentParser() expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_lowerCamelCase ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase ) expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Optional[int] = parser.parse_args([] ) self.assertEqual( _lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , ) A_ : str = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() ) self.assertEqual(_lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) ) def _a ( self : Dict ): """simple docstring""" A_ : Optional[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=_lowerCamelCase , type=_lowerCamelCase ) expected.add_argument('''--bar''' , default=_lowerCamelCase , type=_lowerCamelCase , help='''help message''' ) expected.add_argument('''--baz''' , default=_lowerCamelCase , type=_lowerCamelCase ) expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) A_ : Tuple = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(_lowerCamelCase ) for dataclass_type in dataclass_types: A_ : int = HfArgumentParser(_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : List[Any] = parser.parse_args([] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , bar=_lowerCamelCase , baz=_lowerCamelCase , ces=[] , des=[] ) ) A_ : Optional[Any] = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() ) self.assertEqual(_lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) ) def _a ( self : List[Any] ): """simple docstring""" A_ : List[Any] = HfArgumentParser(_lowerCamelCase ) A_ : Dict = argparse.ArgumentParser() expected.add_argument('''--required_list''' , nargs='''+''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--required_str''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase ) A_ : List[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , ) expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : List[Any] = HfArgumentParser(_lowerCamelCase ) A_ : Union[str, Any] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } A_ : Optional[int] = parser.parse_dict(_lowerCamelCase )[0] A_ : str = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : List[str] ): """simple docstring""" A_ : Any = HfArgumentParser(_lowerCamelCase ) A_ : List[str] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, '''extra''': 42, } self.assertRaises(_lowerCamelCase , parser.parse_dict , _lowerCamelCase , allow_extra_keys=_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase ) A_ : List[str] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: A_ : Tuple = os.path.join(_lowerCamelCase , '''temp_json''' ) os.mkdir(_lowerCamelCase ) with open(temp_local_path + '''.json''' , '''w+''' ) as f: json.dump(_lowerCamelCase , _lowerCamelCase ) A_ : List[str] = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0] A_ : Optional[Any] = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : int ): """simple docstring""" A_ : int = HfArgumentParser(_lowerCamelCase ) A_ : Tuple = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: A_ : int = os.path.join(_lowerCamelCase , '''temp_yaml''' ) os.mkdir(_lowerCamelCase ) with open(temp_local_path + '''.yaml''' , '''w+''' ) as f: yaml.dump(_lowerCamelCase , _lowerCamelCase ) A_ : Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0] A_ : int = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Dict = HfArgumentParser(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase )
4
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() snake_case__ = logging.get_logger(__name__) def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : str=False ) -> List[str]: A_ : Any = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'blocks.{i}.norm1.weight', f'deit.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((f'blocks.{i}.norm1.bias', f'deit.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append((f'blocks.{i}.attn.proj.weight', f'deit.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append((f'blocks.{i}.attn.proj.bias', f'deit.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append((f'blocks.{i}.norm2.weight', f'deit.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((f'blocks.{i}.norm2.bias', f'deit.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'deit.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'deit.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'deit.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'deit.encoder.layer.{i}.output.dense.bias') ) # projection layer + position embeddings rename_keys.extend( [ ('''cls_token''', '''deit.embeddings.cls_token'''), ('''dist_token''', '''deit.embeddings.distillation_token'''), ('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''), ('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''), ('''pos_embed''', '''deit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" A_ : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ('''norm.weight''', '''deit.layernorm.weight'''), ('''norm.bias''', '''deit.layernorm.bias'''), ('''head.weight''', '''cls_classifier.weight'''), ('''head.bias''', '''cls_classifier.bias'''), ('''head_dist.weight''', '''distillation_classifier.weight'''), ('''head_dist.bias''', '''distillation_classifier.bias'''), ] ) return rename_keys def snake_case__ ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple=False ) -> str: for i in range(config.num_hidden_layers ): if base_model: A_ : Optional[Any] = '''''' else: A_ : Union[str, Any] = '''deit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A_ : List[Any] = state_dict.pop(f'blocks.{i}.attn.qkv.weight' ) A_ : List[Any] = state_dict.pop(f'blocks.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict A_ : int = in_proj_weight[ : config.hidden_size, : ] A_ : List[str] = in_proj_bias[: config.hidden_size] A_ : Optional[int] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A_ : Optional[int] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A_ : Union[str, Any] = in_proj_weight[ -config.hidden_size :, : ] A_ : Dict = in_proj_bias[-config.hidden_size :] def snake_case__ ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] ) -> Dict: A_ : Optional[Any] = dct.pop(lowerCamelCase__ ) A_ : Optional[Any] = val def snake_case__ ( ) -> List[str]: A_ : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg''' A_ : Any = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ) return im @torch.no_grad() def snake_case__ ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Any ) -> Optional[Any]: A_ : Any = DeiTConfig() # all deit models have fine-tuned heads A_ : Optional[int] = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size A_ : List[Any] = 1_0_0_0 A_ : Union[str, Any] = '''huggingface/label-files''' A_ : Tuple = '''imagenet-1k-id2label.json''' A_ : Dict = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type='''dataset''' ) , '''r''' ) ) A_ : Tuple = {int(lowerCamelCase__ ): v for k, v in idalabel.items()} A_ : Any = idalabel A_ : Union[str, Any] = {v: k for k, v in idalabel.items()} A_ : str = int(deit_name[-6:-4] ) A_ : Any = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith('''tiny''' ): A_ : Optional[int] = 1_9_2 A_ : Dict = 7_6_8 A_ : List[str] = 1_2 A_ : Optional[int] = 3 elif deit_name[9:].startswith('''small''' ): A_ : Tuple = 3_8_4 A_ : int = 1_5_3_6 A_ : Dict = 1_2 A_ : Any = 6 if deit_name[9:].startswith('''base''' ): pass elif deit_name[4:].startswith('''large''' ): A_ : Tuple = 1_0_2_4 A_ : List[Any] = 4_0_9_6 A_ : Any = 2_4 A_ : Union[str, Any] = 1_6 # load original model from timm A_ : List[str] = timm.create_model(lowerCamelCase__ , pretrained=lowerCamelCase__ ) timm_model.eval() # load state_dict of original model, remove and rename some keys A_ : List[Any] = timm_model.state_dict() A_ : List[Any] = create_rename_keys(lowerCamelCase__ , lowerCamelCase__ ) for src, dest in rename_keys: rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) read_in_q_k_v(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # load HuggingFace model A_ : Optional[Any] = DeiTForImageClassificationWithTeacher(lowerCamelCase__ ).eval() model.load_state_dict(lowerCamelCase__ ) # Check outputs on an image, prepared by DeiTImageProcessor A_ : str = int( (2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 A_ : Union[str, Any] = DeiTImageProcessor(size=lowerCamelCase__ , crop_size=config.image_size ) A_ : str = image_processor(images=prepare_img() , return_tensors='''pt''' ) A_ : Tuple = encoding['''pixel_values'''] A_ : Dict = model(lowerCamelCase__ ) A_ : Tuple = timm_model(lowerCamelCase__ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(lowerCamelCase__ , outputs.logits , atol=1e-3 ) Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) print(f'Saving model {deit_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCamelCase__ ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(lowerCamelCase__ ) if __name__ == "__main__": snake_case__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--deit_name""", default="""vit_deit_base_distilled_patch16_224""", type=str, help="""Name of the DeiT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) snake_case__ = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
4
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 snake_case__ = get_tests_dir("""fixtures""") class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : List[str] ): """simple docstring""" A_ : List[Any] = mock.Mock() A_ : List[str] = 500 A_ : Tuple = {} A_ : int = HTTPError A_ : Optional[Any] = {} # Download this model to make sure it's in the cache. A_ : Tuple = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' , return_value=_lowerCamelCase ) as mock_head: A_ : List[Any] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # This check we did call the fake head request mock_head.assert_called() def _a ( self : Tuple ): """simple docstring""" A_ : Tuple = ViTImageProcessor.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' ) def _a ( self : Dict ): """simple docstring""" with self.assertRaises(_lowerCamelCase ): # config is in subfolder, the following should not work without specifying the subfolder A_ : Any = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' ) A_ : Tuple = AutoImageProcessor.from_pretrained( '''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' ) self.assertIsNotNone(_lowerCamelCase ) @is_staging_test class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @classmethod def _a ( cls : Tuple ): """simple docstring""" A_ : int = TOKEN HfFolder.save_token(_lowerCamelCase ) @classmethod def _a ( cls : str ): """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-image-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' ) except HTTPError: pass def _a ( self : List[Any] ): """simple docstring""" A_ : Dict = ViTImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token ) A_ : Optional[int] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( _lowerCamelCase , repo_id='''test-image-processor''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token ) A_ : List[Any] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : int = ViTImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token ) A_ : List[str] = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( _lowerCamelCase , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token ) A_ : Any = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) def _a ( self : Optional[Any] ): """simple docstring""" CustomImageProcessor.register_for_auto_class() A_ : Any = CustomImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , ) A_ : str = AutoImageProcessor.from_pretrained( f'{USER}/test-dynamic-image-processor' , trust_remote_code=_lowerCamelCase ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
4
1
'''simple docstring''' import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : Dict ): """simple docstring""" A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : Tuple = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : Dict = -1 A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : Any = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) A_ : List[str] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: A_ : List[str] = TextStreamer(_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer A_ : Dict = cs.out[:-1] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : Dict = -1 A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : Optional[int] = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) A_ : str = tokenizer.decode(greedy_ids[0] ) A_ : int = TextIteratorStreamer(_lowerCamelCase ) A_ : List[Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} A_ : List[Any] = Thread(target=model.generate , kwargs=_lowerCamelCase ) thread.start() A_ : List[Any] = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : int ): """simple docstring""" A_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : List[str] = -1 A_ : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : Tuple = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) A_ : Tuple = greedy_ids[:, input_ids.shape[1] :] A_ : Tuple = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: A_ : Any = TextStreamer(_lowerCamelCase , skip_prompt=_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer A_ : Any = cs.out[:-1] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : List[Any] ): """simple docstring""" A_ : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' ) A_ : Tuple = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowerCamelCase ) A_ : List[Any] = -1 A_ : Union[str, Any] = torch.ones((1, 5) , device=_lowerCamelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: A_ : List[Any] = TextStreamer(_lowerCamelCase , skip_special_tokens=_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=1 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token A_ : List[str] = cs.out[:-1] # Remove the final "\n" A_ : List[Any] = tokenizer(_lowerCamelCase , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : Union[str, Any] = -1 A_ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : List[str] = TextIteratorStreamer(_lowerCamelCase , timeout=0.0_01 ) A_ : str = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} A_ : List[str] = Thread(target=model.generate , kwargs=_lowerCamelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_lowerCamelCase ): A_ : str = '''''' for new_text in streamer: streamer_text += new_text
4
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = ( 'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.' 'It takes two arguments named `image` which should be the original image, and `label` which should be a text ' 'describing the elements what should be identified in the segmentation mask. The tool returns the mask.' ) _lowerCAmelCase = 'CIDAS/clipseg-rd64-refined' _lowerCAmelCase = 'image_segmenter' _lowerCAmelCase = CLIPSegForImageSegmentation _lowerCAmelCase = ['image', 'text'] _lowerCAmelCase = ['image'] def __init__( self : Optional[int] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Union[str, Any] ): """simple docstring""" requires_backends(self , ['''vision'''] ) super().__init__(*_lowerCamelCase , **_lowerCamelCase ) def _a ( self : List[str] , _lowerCamelCase : "Image" , _lowerCamelCase : str ): """simple docstring""" return self.pre_processor(text=[label] , images=[image] , padding=_lowerCamelCase , return_tensors='''pt''' ) def _a ( self : Union[str, Any] , _lowerCamelCase : Optional[int] ): """simple docstring""" with torch.no_grad(): A_ : Optional[int] = self.model(**_lowerCamelCase ).logits return logits def _a ( self : List[str] , _lowerCamelCase : Optional[int] ): """simple docstring""" A_ : int = outputs.cpu().detach().numpy() A_ : Tuple = 0 A_ : List[str] = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
4
1
'''simple docstring''' import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py snake_case__ = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. snake_case__ = direct_transformers_import(PATH_TO_TRANSFORMERS) snake_case__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` snake_case__ = re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") snake_case__ = { """DecisionTransformerConfig""", """EncoderDecoderConfig""", """MusicgenConfig""", """RagConfig""", """SpeechEncoderDecoderConfig""", """TimmBackboneConfig""", """VisionEncoderDecoderConfig""", """VisionTextDualEncoderConfig""", """LlamaConfig""", } def snake_case__ ( lowerCamelCase__ : List[str] ) -> Optional[Any]: A_ : Union[str, Any] = None # source code of `config_class` A_ : Tuple = inspect.getsource(lowerCamelCase__ ) A_ : Tuple = _re_checkpoint.findall(lowerCamelCase__ ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith('''/''' ): A_ : Tuple = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link A_ : Union[str, Any] = f'https://huggingface.co/{ckpt_name}' if ckpt_link == ckpt_link_from_name: A_ : List[str] = ckpt_name break return checkpoint def snake_case__ ( ) -> List[str]: A_ : List[str] = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue A_ : Optional[int] = get_checkpoint_from_config_class(lowerCamelCase__ ) A_ : Optional[Any] = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(lowerCamelCase__ ) if len(lowerCamelCase__ ) > 0: A_ : str = '''\n'''.join(sorted(lowerCamelCase__ ) ) raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
4
'''simple docstring''' from collections.abc import Sequence def snake_case__ ( lowerCamelCase__ : Sequence[float] , lowerCamelCase__ : bool = False ) -> float: if not arr: return 0 A_ : Union[str, Any] = 0 if allow_empty_subarrays else float('''-inf''' ) A_ : str = 0.0 for num in arr: A_ : Any = max(0 if allow_empty_subarrays else num , curr_sum + num ) A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() snake_case__ = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'{max_subarray_sum(nums) = }')
4
1
'''simple docstring''' import unittest from diffusers.models.unet_ad_blocks import * # noqa F403 from diffusers.utils import torch_device from .test_unet_blocks_common import UNetBlockTesterMixin class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = DownBlockaD # noqa F405 _lowerCAmelCase = 'down' def _a ( self : Optional[Any] ): """simple docstring""" A_ : str = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04] super().test_output(_lowerCamelCase ) class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = ResnetDownsampleBlockaD # noqa F405 _lowerCAmelCase = 'down' def _a ( self : Any ): """simple docstring""" A_ : Any = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48] super().test_output(_lowerCamelCase ) class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = AttnDownBlockaD # noqa F405 _lowerCAmelCase = 'down' def _a ( self : List[str] ): """simple docstring""" A_ : Dict = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57] super().test_output(_lowerCamelCase ) class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = CrossAttnDownBlockaD # noqa F405 _lowerCAmelCase = 'down' def _a ( self : Dict ): """simple docstring""" A_ ,A_ : str = super().prepare_init_args_and_inputs_for_common() A_ : Tuple = 32 return init_dict, inputs_dict def _a ( self : str ): """simple docstring""" A_ : Optional[Any] = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83] super().test_output(_lowerCamelCase ) class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = SimpleCrossAttnDownBlockaD # noqa F405 _lowerCAmelCase = 'down' @property def _a ( self : Optional[int] ): """simple docstring""" return super().get_dummy_input(include_encoder_hidden_states=_lowerCamelCase ) def _a ( self : Optional[int] ): """simple docstring""" A_ ,A_ : List[Any] = super().prepare_init_args_and_inputs_for_common() A_ : List[str] = 32 return init_dict, inputs_dict @unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' ) def _a ( self : Dict ): """simple docstring""" A_ : Tuple = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38] super().test_output(_lowerCamelCase ) class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = SkipDownBlockaD # noqa F405 _lowerCAmelCase = 'down' @property def _a ( self : Dict ): """simple docstring""" return super().get_dummy_input(include_skip_sample=_lowerCamelCase ) def _a ( self : str ): """simple docstring""" A_ : Optional[Any] = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69] super().test_output(_lowerCamelCase ) class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = AttnSkipDownBlockaD # noqa F405 _lowerCAmelCase = 'down' @property def _a ( self : int ): """simple docstring""" return super().get_dummy_input(include_skip_sample=_lowerCamelCase ) def _a ( self : Any ): """simple docstring""" A_ : Optional[Any] = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42] super().test_output(_lowerCamelCase ) class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = DownEncoderBlockaD # noqa F405 _lowerCAmelCase = 'down' @property def _a ( self : Optional[int] ): """simple docstring""" return super().get_dummy_input(include_temb=_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Union[str, Any] = { '''in_channels''': 32, '''out_channels''': 32, } A_ : int = self.dummy_input return init_dict, inputs_dict def _a ( self : Optional[int] ): """simple docstring""" A_ : Optional[Any] = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26] super().test_output(_lowerCamelCase ) class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = AttnDownEncoderBlockaD # noqa F405 _lowerCAmelCase = 'down' @property def _a ( self : List[str] ): """simple docstring""" return super().get_dummy_input(include_temb=_lowerCamelCase ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : str = { '''in_channels''': 32, '''out_channels''': 32, } A_ : Optional[int] = self.dummy_input return init_dict, inputs_dict def _a ( self : List[str] ): """simple docstring""" A_ : Union[str, Any] = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38] super().test_output(_lowerCamelCase ) class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = UNetMidBlockaD # noqa F405 _lowerCAmelCase = 'mid' def _a ( self : Any ): """simple docstring""" A_ : List[str] = { '''in_channels''': 32, '''temb_channels''': 128, } A_ : Tuple = self.dummy_input return init_dict, inputs_dict def _a ( self : List[str] ): """simple docstring""" A_ : Tuple = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28] super().test_output(_lowerCamelCase ) class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = UNetMidBlockaDCrossAttn # noqa F405 _lowerCAmelCase = 'mid' def _a ( self : Optional[int] ): """simple docstring""" A_ ,A_ : List[Any] = super().prepare_init_args_and_inputs_for_common() A_ : int = 32 return init_dict, inputs_dict def _a ( self : List[str] ): """simple docstring""" A_ : Tuple = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35] super().test_output(_lowerCamelCase ) class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = UNetMidBlockaDSimpleCrossAttn # noqa F405 _lowerCAmelCase = 'mid' @property def _a ( self : Any ): """simple docstring""" return super().get_dummy_input(include_encoder_hidden_states=_lowerCamelCase ) def _a ( self : List[Any] ): """simple docstring""" A_ ,A_ : Any = super().prepare_init_args_and_inputs_for_common() A_ : Optional[Any] = 32 return init_dict, inputs_dict def _a ( self : int ): """simple docstring""" A_ : Any = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80] super().test_output(_lowerCamelCase ) class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = UpBlockaD # noqa F405 _lowerCAmelCase = 'up' @property def _a ( self : str ): """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCamelCase ) def _a ( self : int ): """simple docstring""" A_ : Tuple = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23] super().test_output(_lowerCamelCase ) class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = ResnetUpsampleBlockaD # noqa F405 _lowerCAmelCase = 'up' @property def _a ( self : Optional[Any] ): """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Any = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44] super().test_output(_lowerCamelCase ) class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = CrossAttnUpBlockaD # noqa F405 _lowerCAmelCase = 'up' @property def _a ( self : Dict ): """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCamelCase ) def _a ( self : List[Any] ): """simple docstring""" A_ ,A_ : Union[str, Any] = super().prepare_init_args_and_inputs_for_common() A_ : Tuple = 32 return init_dict, inputs_dict def _a ( self : int ): """simple docstring""" A_ : Optional[Any] = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82] super().test_output(_lowerCamelCase ) class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = SimpleCrossAttnUpBlockaD # noqa F405 _lowerCAmelCase = 'up' @property def _a ( self : List[str] ): """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCamelCase , include_encoder_hidden_states=_lowerCamelCase ) def _a ( self : Dict ): """simple docstring""" A_ ,A_ : Optional[Any] = super().prepare_init_args_and_inputs_for_common() A_ : Optional[Any] = 32 return init_dict, inputs_dict def _a ( self : List[str] ): """simple docstring""" A_ : Optional[Any] = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02] super().test_output(_lowerCamelCase ) class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = AttnUpBlockaD # noqa F405 _lowerCAmelCase = 'up' @property def _a ( self : List[Any] ): """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCamelCase ) @unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' ) def _a ( self : int ): """simple docstring""" A_ : str = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33] super().test_output(_lowerCamelCase ) class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = SkipUpBlockaD # noqa F405 _lowerCAmelCase = 'up' @property def _a ( self : Any ): """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCamelCase ) def _a ( self : List[str] ): """simple docstring""" A_ : List[str] = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62] super().test_output(_lowerCamelCase ) class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = AttnSkipUpBlockaD # noqa F405 _lowerCAmelCase = 'up' @property def _a ( self : Tuple ): """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=_lowerCamelCase ) def _a ( self : List[Any] ): """simple docstring""" A_ : List[str] = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15] super().test_output(_lowerCamelCase ) class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = UpDecoderBlockaD # noqa F405 _lowerCAmelCase = 'up' @property def _a ( self : str ): """simple docstring""" return super().get_dummy_input(include_temb=_lowerCamelCase ) def _a ( self : Any ): """simple docstring""" A_ : Optional[Any] = {'''in_channels''': 32, '''out_channels''': 32} A_ : Optional[Any] = self.dummy_input return init_dict, inputs_dict def _a ( self : Optional[Any] ): """simple docstring""" A_ : List[Any] = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37] super().test_output(_lowerCamelCase ) class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = AttnUpDecoderBlockaD # noqa F405 _lowerCAmelCase = 'up' @property def _a ( self : Any ): """simple docstring""" return super().get_dummy_input(include_temb=_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Any = {'''in_channels''': 32, '''out_channels''': 32} A_ : str = self.dummy_input return init_dict, inputs_dict def _a ( self : List[str] ): """simple docstring""" A_ : Optional[int] = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68] super().test_output(_lowerCamelCase )
4
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json""" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'speech_to_text_2' _lowerCAmelCase = ['past_key_values'] _lowerCAmelCase = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : Optional[Any] , _lowerCamelCase : Optional[Any]=10000 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : int=2048 , _lowerCamelCase : Dict=4 , _lowerCamelCase : str=0.0 , _lowerCamelCase : int=True , _lowerCamelCase : int="relu" , _lowerCamelCase : Any=256 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : int=2 , _lowerCamelCase : List[str]=True , _lowerCamelCase : str=1 , _lowerCamelCase : List[Any]=0 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : Tuple=1024 , **_lowerCamelCase : int , ): """simple docstring""" A_ : Optional[int] = vocab_size A_ : Tuple = d_model A_ : List[str] = decoder_ffn_dim A_ : str = decoder_layers A_ : Any = decoder_attention_heads A_ : int = dropout A_ : str = attention_dropout A_ : Optional[int] = activation_dropout A_ : str = activation_function A_ : List[Any] = init_std A_ : Union[str, Any] = decoder_layerdrop A_ : Any = use_cache A_ : Optional[Any] = decoder_layers A_ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True A_ : Optional[Any] = max_target_positions super().__init__( pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
4
1
'''simple docstring''' from __future__ import annotations from PIL import Image # Define glider example snake_case__ = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example snake_case__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def snake_case__ ( lowerCamelCase__ : list[list[int]] ) -> list[list[int]]: A_ : str = [] for i in range(len(lowerCamelCase__ ) ): A_ : Optional[Any] = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours A_ : Optional[int] = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(lowerCamelCase__ ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(lowerCamelCase__ ) - 1: neighbour_count += cells[i + 1][j] if i < len(lowerCamelCase__ ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. A_ : List[str] = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(lowerCamelCase__ ) return next_generation def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[Image.Image]: A_ : List[Any] = [] for _ in range(lowerCamelCase__ ): # Create output image A_ : Optional[int] = Image.new('''RGB''' , (len(cells[0] ), len(lowerCamelCase__ )) ) A_ : int = img.load() # Save cells to image for x in range(len(lowerCamelCase__ ) ): for y in range(len(cells[0] ) ): A_ : Optional[Any] = 2_5_5 - cells[y][x] * 2_5_5 A_ : str = (colour, colour, colour) # Save image images.append(lowerCamelCase__ ) A_ : Optional[int] = new_generation(lowerCamelCase__ ) return images if __name__ == "__main__": snake_case__ = generate_images(GLIDER, 16) images[0].save("""out.gif""", save_all=True, append_images=images[1:])
4
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING snake_case__ = logging.get_logger(__name__) snake_case__ = { """microsoft/table-transformer-detection""": ( """https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json""" ), } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'table-transformer' _lowerCAmelCase = ['past_key_values'] _lowerCAmelCase = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : Any , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Dict=None , _lowerCamelCase : int=3 , _lowerCamelCase : Any=100 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : Any=8 , _lowerCamelCase : Dict=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : int=8 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : Union[str, Any]=256 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : str=0.02 , _lowerCamelCase : Tuple=1.0 , _lowerCamelCase : Dict=False , _lowerCamelCase : str="sine" , _lowerCamelCase : str="resnet50" , _lowerCamelCase : Any=True , _lowerCamelCase : List[str]=False , _lowerCamelCase : Any=1 , _lowerCamelCase : int=5 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Optional[int]=1 , _lowerCamelCase : Any=1 , _lowerCamelCase : Dict=5 , _lowerCamelCase : str=2 , _lowerCamelCase : Union[str, Any]=0.1 , **_lowerCamelCase : int , ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) A_ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : str = backbone_config.get('''model_type''' ) A_ : Optional[int] = CONFIG_MAPPING[backbone_model_type] A_ : List[str] = config_class.from_dict(_lowerCamelCase ) # set timm attributes to None A_ ,A_ ,A_ : Union[str, Any] = None, None, None A_ : Optional[Any] = use_timm_backbone A_ : Optional[int] = backbone_config A_ : Optional[Any] = num_channels A_ : Dict = num_queries A_ : str = d_model A_ : List[str] = encoder_ffn_dim A_ : int = encoder_layers A_ : Optional[Any] = encoder_attention_heads A_ : List[str] = decoder_ffn_dim A_ : Any = decoder_layers A_ : List[str] = decoder_attention_heads A_ : Tuple = dropout A_ : Optional[Any] = attention_dropout A_ : Any = activation_dropout A_ : List[Any] = activation_function A_ : Dict = init_std A_ : Any = init_xavier_std A_ : List[Any] = encoder_layerdrop A_ : int = decoder_layerdrop A_ : Any = encoder_layers A_ : List[str] = auxiliary_loss A_ : List[Any] = position_embedding_type A_ : Optional[Any] = backbone A_ : Tuple = use_pretrained_backbone A_ : List[Any] = dilation # Hungarian matcher A_ : List[str] = class_cost A_ : str = bbox_cost A_ : Union[str, Any] = giou_cost # Loss coefficients A_ : Any = mask_loss_coefficient A_ : Optional[int] = dice_loss_coefficient A_ : Dict = bbox_loss_coefficient A_ : int = giou_loss_coefficient A_ : int = eos_coefficient super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase ) @property def _a ( self : List[Any] ): """simple docstring""" return self.encoder_attention_heads @property def _a ( self : Any ): """simple docstring""" return self.d_model class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = version.parse('1.11' ) @property def _a ( self : Tuple ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ] ) @property def _a ( self : Optional[int] ): """simple docstring""" return 1E-5 @property def _a ( self : str ): """simple docstring""" return 12
4
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json""" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'speech_to_text_2' _lowerCAmelCase = ['past_key_values'] _lowerCAmelCase = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : Optional[Any] , _lowerCamelCase : Optional[Any]=10000 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : int=2048 , _lowerCamelCase : Dict=4 , _lowerCamelCase : str=0.0 , _lowerCamelCase : int=True , _lowerCamelCase : int="relu" , _lowerCamelCase : Any=256 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : int=2 , _lowerCamelCase : List[str]=True , _lowerCamelCase : str=1 , _lowerCamelCase : List[Any]=0 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : Tuple=1024 , **_lowerCamelCase : int , ): """simple docstring""" A_ : Optional[int] = vocab_size A_ : Tuple = d_model A_ : List[str] = decoder_ffn_dim A_ : str = decoder_layers A_ : Any = decoder_attention_heads A_ : int = dropout A_ : str = attention_dropout A_ : Optional[int] = activation_dropout A_ : str = activation_function A_ : List[Any] = init_std A_ : Union[str, Any] = decoder_layerdrop A_ : Any = use_cache A_ : Optional[Any] = decoder_layers A_ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True A_ : Optional[Any] = max_target_positions super().__init__( pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
4
'''simple docstring''' import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : List[str]=3 , _lowerCamelCase : Any=32 , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : int=10 , _lowerCamelCase : Union[str, Any]=[8, 16, 32, 64] , _lowerCamelCase : Dict=[1, 1, 2, 1] , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Any="relu" , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Dict=["stage2", "stage3", "stage4"] , _lowerCamelCase : Union[str, Any]=[2, 3, 4] , _lowerCamelCase : Tuple=1 , ): """simple docstring""" A_ : List[str] = parent A_ : List[str] = batch_size A_ : Union[str, Any] = image_size A_ : Tuple = num_channels A_ : Any = embeddings_size A_ : int = hidden_sizes A_ : Optional[Any] = depths A_ : List[Any] = is_training A_ : Optional[int] = use_labels A_ : int = hidden_act A_ : Tuple = num_labels A_ : Union[str, Any] = scope A_ : List[Any] = len(_lowerCamelCase ) A_ : Union[str, Any] = out_features A_ : List[Any] = out_indices A_ : Dict = num_groups def _a ( self : Optional[int] ): """simple docstring""" A_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : Union[str, Any] = None if self.use_labels: A_ : Any = ids_tensor([self.batch_size] , self.num_labels ) A_ : Any = self.get_config() return config, pixel_values, labels def _a ( self : Union[str, Any] ): """simple docstring""" return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def _a ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] ): """simple docstring""" A_ : Any = BitModel(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : int = model(_lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _a ( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] ): """simple docstring""" A_ : Dict = self.num_labels A_ : Optional[Any] = BitForImageClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self : Any , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[Any] ): """simple docstring""" A_ : List[Any] = BitBackbone(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : int = model(_lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None A_ : Optional[Any] = None A_ : int = BitBackbone(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : Optional[int] = model(_lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _a ( self : List[Any] ): """simple docstring""" A_ : Union[str, Any] = self.prepare_config_and_inputs() A_ ,A_ ,A_ : Union[str, Any] = config_and_inputs A_ : str = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class UpperCamelCase_ (a__, a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () _lowerCAmelCase = ( {'feature-extraction': BitModel, 'image-classification': BitForImageClassification} if is_torch_available() else {} ) _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False def _a ( self : Optional[Any] ): """simple docstring""" A_ : List[str] = BitModelTester(self ) A_ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _a ( self : List[Any] ): """simple docstring""" return @unittest.skip(reason='''Bit does not output attentions''' ) def _a ( self : str ): """simple docstring""" pass @unittest.skip(reason='''Bit does not use inputs_embeds''' ) def _a ( self : Union[str, Any] ): """simple docstring""" pass @unittest.skip(reason='''Bit does not support input and output embeddings''' ) def _a ( self : Any ): """simple docstring""" pass def _a ( self : List[Any] ): """simple docstring""" A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Dict = model_class(_lowerCamelCase ) A_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : int = [*signature.parameters.keys()] A_ : Union[str, Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ ,A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : str = model_class(config=_lowerCamelCase ) for name, module in model.named_modules(): if isinstance(_lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) def _a ( self : int ): """simple docstring""" def check_hidden_states_output(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : int ): A_ : Union[str, Any] = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() with torch.no_grad(): A_ : Union[str, Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) A_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A_ : List[Any] = self.model_tester.num_stages self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common() A_ : Tuple = ['''preactivation''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: A_ : Tuple = layer_type A_ : Optional[Any] = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A_ : List[str] = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) @unittest.skip(reason='''Bit does not use feedforward chunking''' ) def _a ( self : Tuple ): """simple docstring""" pass def _a ( self : str ): """simple docstring""" A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) @slow def _a ( self : Union[str, Any] ): """simple docstring""" for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : List[Any] = BitModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def snake_case__ ( ) -> Optional[int]: A_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : List[Any] ): """simple docstring""" return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _a ( self : Dict ): """simple docstring""" A_ : Optional[int] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowerCamelCase ) A_ : Union[str, Any] = self.default_image_processor A_ : Optional[int] = prepare_img() A_ : int = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase ) # forward pass with torch.no_grad(): A_ : Union[str, Any] = model(**_lowerCamelCase ) # verify the logits A_ : Dict = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) A_ : Tuple = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) ) @require_torch class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = (BitBackbone,) if is_torch_available() else () _lowerCAmelCase = BitConfig _lowerCAmelCase = False def _a ( self : List[str] ): """simple docstring""" A_ : Union[str, Any] = BitModelTester(self )
4
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = KandinskyImgaImgPipeline _lowerCAmelCase = ['prompt', 'image_embeds', 'negative_image_embeds', 'image'] _lowerCAmelCase = [ 'prompt', 'negative_prompt', 'image_embeds', 'negative_image_embeds', 'image', ] _lowerCAmelCase = [ 'generator', 'height', 'width', 'strength', 'guidance_scale', 'negative_prompt', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] _lowerCAmelCase = False @property def _a ( self : List[str] ): """simple docstring""" return 32 @property def _a ( self : List[str] ): """simple docstring""" return 32 @property def _a ( self : List[Any] ): """simple docstring""" return self.time_input_dim @property def _a ( self : Tuple ): """simple docstring""" return self.time_input_dim * 4 @property def _a ( self : Optional[Any] ): """simple docstring""" return 100 @property def _a ( self : int ): """simple docstring""" A_ : Dict = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' ) return tokenizer @property def _a ( self : Union[str, Any] ): """simple docstring""" torch.manual_seed(0 ) A_ : Tuple = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) A_ : int = MultilingualCLIP(_lowerCamelCase ) A_ : Dict = text_encoder.eval() return text_encoder @property def _a ( self : Any ): """simple docstring""" torch.manual_seed(0 ) A_ : Optional[Any] = { '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''text_image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''text_image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } A_ : List[Any] = UNetaDConditionModel(**_lowerCamelCase ) return model @property def _a ( self : List[str] ): """simple docstring""" return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def _a ( self : Optional[Any] ): """simple docstring""" torch.manual_seed(0 ) A_ : Optional[Any] = VQModel(**self.dummy_movq_kwargs ) return model def _a ( self : str ): """simple docstring""" A_ : Tuple = self.dummy_text_encoder A_ : Optional[int] = self.dummy_tokenizer A_ : Dict = self.dummy_unet A_ : Dict = self.dummy_movq A_ : Any = { '''num_train_timesteps''': 1000, '''beta_schedule''': '''linear''', '''beta_start''': 0.0_00_85, '''beta_end''': 0.0_12, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } A_ : int = DDIMScheduler(**_lowerCamelCase ) A_ : str = { '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def _a ( self : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : List[str]=0 ): """simple docstring""" A_ : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) A_ : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowerCamelCase ) # create init_image A_ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase ) A_ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] A_ : Optional[Any] = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) ) if str(_lowerCamelCase ).startswith('''mps''' ): A_ : Optional[int] = torch.manual_seed(_lowerCamelCase ) else: A_ : Tuple = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase ) A_ : Optional[Any] = { '''prompt''': '''horse''', '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def _a ( self : Any ): """simple docstring""" A_ : Any = '''cpu''' A_ : Optional[Any] = self.get_dummy_components() A_ : Optional[int] = self.pipeline_class(**_lowerCamelCase ) A_ : Dict = pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) A_ : Optional[int] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) ) A_ : Optional[Any] = output.images A_ : Dict = pipe( **self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0] A_ : List[str] = image[0, -3:, -3:, -1] A_ : Optional[int] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A_ : Any = np.array( [0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' @slow @require_torch_gpu class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : str ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : List[str] ): """simple docstring""" A_ : List[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/kandinsky_img2img_frog.npy''' ) A_ : str = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) A_ : Union[str, Any] = '''A red cartoon frog, 4k''' A_ : List[str] = KandinskyPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(_lowerCamelCase ) A_ : Tuple = KandinskyImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa ) A_ : Any = pipeline.to(_lowerCamelCase ) pipeline.set_progress_bar_config(disable=_lowerCamelCase ) A_ : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 ) A_ ,A_ : Optional[int] = pipe_prior( _lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() A_ : Optional[Any] = pipeline( _lowerCamelCase , image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , ) A_ : Union[str, Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
4
'''simple docstring''' import pprint import requests snake_case__ = """https://zenquotes.io/api""" def snake_case__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '''/today''' ).json() def snake_case__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '''/random''' ).json() if __name__ == "__main__": snake_case__ = random_quotes() pprint.pprint(response)
4
1
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : List[Any] ): """simple docstring""" A_ : List[str] = inspect.getfile(accelerate.test_utils ) A_ : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] ) A_ : Dict = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] ) A_ : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] ) @require_multi_gpu def _a ( self : Union[str, Any] ): """simple docstring""" print(f'Found {torch.cuda.device_count()} devices.' ) A_ : List[str] = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_lowerCamelCase , env=os.environ.copy() ) @require_multi_gpu def _a ( self : Optional[Any] ): """simple docstring""" print(f'Found {torch.cuda.device_count()} devices.' ) A_ : Optional[Any] = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path] print(f'Command: {cmd}' ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_lowerCamelCase , env=os.environ.copy() ) @require_multi_gpu def _a ( self : Optional[int] ): """simple docstring""" A_ : Dict = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_lowerCamelCase , env=os.environ.copy() ) @require_multi_gpu def _a ( self : str ): """simple docstring""" print(f'Found {torch.cuda.device_count()} devices, using 2 devices only' ) A_ : List[Any] = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ): execute_subprocess_async(_lowerCamelCase , env=os.environ.copy() ) if __name__ == "__main__": snake_case__ = Accelerator() snake_case__ = (accelerator.state.process_index + 2, 10) snake_case__ = torch.randint(0, 10, shape).to(accelerator.device) snake_case__ = """""" snake_case__ = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." snake_case__ = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." snake_case__ = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
4
'''simple docstring''' from __future__ import annotations class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[int] , _lowerCamelCase : int ): """simple docstring""" A_ : Union[str, Any] = order # a_{0} ... a_{k} A_ : Union[str, Any] = [1.0] + [0.0] * order # b_{0} ... b_{k} A_ : int = [1.0] + [0.0] * order # x[n-1] ... x[n-k] A_ : str = [0.0] * self.order # y[n-1] ... y[n-k] A_ : Optional[Any] = [0.0] * self.order def _a ( self : Dict , _lowerCamelCase : list[float] , _lowerCamelCase : list[float] ): """simple docstring""" if len(_lowerCamelCase ) < self.order: A_ : Any = [1.0, *a_coeffs] if len(_lowerCamelCase ) != self.order + 1: A_ : List[Any] = ( f'Expected a_coeffs to have {self.order + 1} elements ' f'for {self.order}-order filter, got {len(_lowerCamelCase )}' ) raise ValueError(_lowerCamelCase ) if len(_lowerCamelCase ) != self.order + 1: A_ : Union[str, Any] = ( f'Expected b_coeffs to have {self.order + 1} elements ' f'for {self.order}-order filter, got {len(_lowerCamelCase )}' ) raise ValueError(_lowerCamelCase ) A_ : Tuple = a_coeffs A_ : str = b_coeffs def _a ( self : Tuple , _lowerCamelCase : float ): """simple docstring""" A_ : Any = 0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) A_ : str = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0] A_ : Optional[Any] = self.input_history[:-1] A_ : List[str] = self.output_history[:-1] A_ : Tuple = sample A_ : Tuple = result return result
4
1
'''simple docstring''' from __future__ import annotations from collections.abc import MutableSequence class UpperCamelCase_ : """simple docstring""" def __init__( self : int , _lowerCamelCase : int , _lowerCamelCase : MutableSequence[float] ): """simple docstring""" if len(_lowerCamelCase ) != degree + 1: raise ValueError( '''The number of coefficients should be equal to the degree + 1.''' ) A_ : list[float] = list(_lowerCamelCase ) A_ : Optional[Any] = degree def __add__( self : List[str] , _lowerCamelCase : Polynomial ): """simple docstring""" if self.degree > polynomial_a.degree: A_ : Union[str, Any] = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree , _lowerCamelCase ) else: A_ : Tuple = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree , _lowerCamelCase ) def __sub__( self : str , _lowerCamelCase : Polynomial ): """simple docstring""" return self + polynomial_a * Polynomial(0 , [-1] ) def __neg__( self : Any ): """simple docstring""" return Polynomial(self.degree , [-c for c in self.coefficients] ) def __mul__( self : Optional[int] , _lowerCamelCase : Polynomial ): """simple docstring""" A_ : list[float] = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree , _lowerCamelCase ) def _a ( self : str , _lowerCamelCase : int | float ): """simple docstring""" A_ : int | float = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self : Optional[Any] ): """simple docstring""" A_ : List[Any] = '''''' for i in range(self.degree , -1 , -1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_lowerCamelCase ) return polynomial def __repr__( self : Any ): """simple docstring""" return self.__str__() def _a ( self : Dict ): """simple docstring""" A_ : list[float] = [0] * self.degree for i in range(self.degree ): A_ : int = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 , _lowerCamelCase ) def _a ( self : Union[str, Any] , _lowerCamelCase : int | float = 0 ): """simple docstring""" A_ : list[float] = [0] * (self.degree + 2) A_ : str = constant for i in range(self.degree + 1 ): A_ : List[str] = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 , _lowerCamelCase ) def __eq__( self : List[Any] , _lowerCamelCase : object ): """simple docstring""" if not isinstance(_lowerCamelCase , _lowerCamelCase ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self : Dict , _lowerCamelCase : object ): """simple docstring""" return not self.__eq__(_lowerCamelCase )
4
'''simple docstring''' class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[Any] , _lowerCamelCase : Union[str, Any] ): """simple docstring""" A_ : Union[str, Any] = val A_ : Tuple = None A_ : Any = None def _a ( self : Tuple , _lowerCamelCase : List[Any] ): """simple docstring""" if self.val: if val < self.val: if self.left is None: A_ : int = Node(_lowerCamelCase ) else: self.left.insert(_lowerCamelCase ) elif val > self.val: if self.right is None: A_ : List[str] = Node(_lowerCamelCase ) else: self.right.insert(_lowerCamelCase ) else: A_ : Any = val def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] ) -> str: # Recursive traversal if root: inorder(root.left , lowerCamelCase__ ) res.append(root.val ) inorder(root.right , lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> Tuple: # Build BST if len(lowerCamelCase__ ) == 0: return arr A_ : Dict = Node(arr[0] ) for i in range(1 , len(lowerCamelCase__ ) ): root.insert(arr[i] ) # Traverse BST in order. A_ : Tuple = [] inorder(lowerCamelCase__ , lowerCamelCase__ ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
4
1
'''simple docstring''' from __future__ import annotations from bisect import bisect_left from functools import total_ordering from heapq import merge @total_ordering class UpperCamelCase_ (a__ ): """simple docstring""" def __lt__( self : List[Any] , _lowerCamelCase : int ): """simple docstring""" return self[-1] < other[-1] def __eq__( self : List[str] , _lowerCamelCase : int ): """simple docstring""" return self[-1] == other[-1] def snake_case__ ( lowerCamelCase__ : list ) -> list: A_ : list[Stack] = [] # sort into stacks for element in collection: A_ : Optional[Any] = Stack([element] ) A_ : int = bisect_left(lowerCamelCase__ , lowerCamelCase__ ) if i != len(lowerCamelCase__ ): stacks[i].append(lowerCamelCase__ ) else: stacks.append(lowerCamelCase__ ) # use a heap-based merge to merge stack efficiently A_ : Any = merge(*(reversed(lowerCamelCase__ ) for stack in stacks) ) return collection if __name__ == "__main__": snake_case__ = input("""Enter numbers separated by a comma:\n""").strip() snake_case__ = [int(item) for item in user_input.split(""",""")] print(patience_sort(unsorted))
4
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : list ) -> list: if len(lowerCamelCase__ ) <= 1: return [tuple(lowerCamelCase__ )] A_ : List[str] = [] def generate(lowerCamelCase__ : int , lowerCamelCase__ : list ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , lowerCamelCase__ ) for i in range(k - 1 ): if k % 2 == 0: # k is even A_ ,A_ : Optional[int] = arr[k - 1], arr[i] else: # k is odd A_ ,A_ : Union[str, Any] = arr[k - 1], arr[0] generate(k - 1 , lowerCamelCase__ ) generate(len(lowerCamelCase__ ) , lowerCamelCase__ ) return res if __name__ == "__main__": snake_case__ = input("""Enter numbers separated by a comma:\n""").strip() snake_case__ = [int(item) for item in user_input.split(""",""")] print(heaps(arr))
4
1
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : List[Any] ) -> List[Any]: if height >= 1: move_tower(height - 1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) move_disk(lowerCamelCase__ , lowerCamelCase__ ) move_tower(height - 1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int ) -> Optional[Any]: print('''moving disk from''' , lowerCamelCase__ , '''to''' , lowerCamelCase__ ) def snake_case__ ( ) -> Optional[Any]: A_ : Tuple = int(input('''Height of hanoi: ''' ).strip() ) move_tower(lowerCamelCase__ , '''A''' , '''B''' , '''C''' ) if __name__ == "__main__": main()
4
'''simple docstring''' import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : Dict ): """simple docstring""" A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : Tuple = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : Dict = -1 A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : Any = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) A_ : List[str] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: A_ : List[str] = TextStreamer(_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer A_ : Dict = cs.out[:-1] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : Dict = -1 A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : Optional[int] = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) A_ : str = tokenizer.decode(greedy_ids[0] ) A_ : int = TextIteratorStreamer(_lowerCamelCase ) A_ : List[Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} A_ : List[Any] = Thread(target=model.generate , kwargs=_lowerCamelCase ) thread.start() A_ : List[Any] = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : int ): """simple docstring""" A_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : List[str] = -1 A_ : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : Tuple = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) A_ : Tuple = greedy_ids[:, input_ids.shape[1] :] A_ : Tuple = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: A_ : Any = TextStreamer(_lowerCamelCase , skip_prompt=_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer A_ : Any = cs.out[:-1] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : List[Any] ): """simple docstring""" A_ : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' ) A_ : Tuple = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowerCamelCase ) A_ : List[Any] = -1 A_ : Union[str, Any] = torch.ones((1, 5) , device=_lowerCamelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: A_ : List[Any] = TextStreamer(_lowerCamelCase , skip_special_tokens=_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=1 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token A_ : List[str] = cs.out[:-1] # Remove the final "\n" A_ : List[Any] = tokenizer(_lowerCamelCase , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : Union[str, Any] = -1 A_ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : List[str] = TextIteratorStreamer(_lowerCamelCase , timeout=0.0_01 ) A_ : str = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} A_ : List[str] = Thread(target=model.generate , kwargs=_lowerCamelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_lowerCamelCase ): A_ : str = '''''' for new_text in streamer: streamer_text += new_text
4
1
'''simple docstring''' from collections.abc import Sequence def snake_case__ ( lowerCamelCase__ : Sequence[float] , lowerCamelCase__ : bool = False ) -> float: if not arr: return 0 A_ : Union[str, Any] = 0 if allow_empty_subarrays else float('''-inf''' ) A_ : str = 0.0 for num in arr: A_ : Any = max(0 if allow_empty_subarrays else num , curr_sum + num ) A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() snake_case__ = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'{max_subarray_sum(nums) = }')
4
'''simple docstring''' import heapq def snake_case__ ( lowerCamelCase__ : dict ) -> set[int]: A_ : list[list] = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(lowerCamelCase__ , [-1 * len(lowerCamelCase__ ), (key, value)] ) # chosen_vertices = set of chosen vertices A_ : str = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices A_ : Tuple = heapq.heappop(lowerCamelCase__ )[1][0] chosen_vertices.add(lowerCamelCase__ ) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: A_ : List[str] = elem[1][1].index(lowerCamelCase__ ) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(lowerCamelCase__ ) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() snake_case__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
4
1
'''simple docstring''' from __future__ import annotations from typing import Any class UpperCamelCase_ : """simple docstring""" def __init__( self : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0 ): """simple docstring""" A_ ,A_ : Tuple = row, column A_ : Union[str, Any] = [[default_value for c in range(_lowerCamelCase )] for r in range(_lowerCamelCase )] def __str__( self : List[Any] ): """simple docstring""" A_ : List[str] = f'Matrix consist of {self.row} rows and {self.column} columns\n' # Make string identifier A_ : Any = 0 for row_vector in self.array: for obj in row_vector: A_ : int = max(_lowerCamelCase , len(str(_lowerCamelCase ) ) ) A_ : str = f'%{max_element_length}s' # Make string and return def single_line(_lowerCamelCase : list[float] ) -> str: nonlocal string_format_identifier A_ : Optional[int] = '''[''' line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(_lowerCamelCase ) for row_vector in self.array ) return s def __repr__( self : List[Any] ): """simple docstring""" return str(self ) def _a ( self : Optional[Any] , _lowerCamelCase : tuple[int, int] ): """simple docstring""" if not (isinstance(_lowerCamelCase , (list, tuple) ) and len(_lowerCamelCase ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self : Optional[Any] , _lowerCamelCase : tuple[int, int] ): """simple docstring""" assert self.validate_indicies(_lowerCamelCase ) return self.array[loc[0]][loc[1]] def __setitem__( self : Dict , _lowerCamelCase : tuple[int, int] , _lowerCamelCase : float ): """simple docstring""" assert self.validate_indicies(_lowerCamelCase ) A_ : List[str] = value def __add__( self : Tuple , _lowerCamelCase : Matrix ): """simple docstring""" assert isinstance(_lowerCamelCase , _lowerCamelCase ) assert self.row == another.row and self.column == another.column # Add A_ : int = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): A_ : Dict = self[r, c] + another[r, c] return result def __neg__( self : List[str] ): """simple docstring""" A_ : List[str] = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): A_ : str = -self[r, c] return result def __sub__( self : Union[str, Any] , _lowerCamelCase : Matrix ): """simple docstring""" return self + (-another) def __mul__( self : str , _lowerCamelCase : int | float | Matrix ): """simple docstring""" if isinstance(_lowerCamelCase , (int, float) ): # Scalar multiplication A_ : int = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): A_ : Union[str, Any] = self[r, c] * another return result elif isinstance(_lowerCamelCase , _lowerCamelCase ): # Matrix multiplication assert self.column == another.row A_ : int = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: A_ : List[Any] = f'Unsupported type given for another ({type(_lowerCamelCase )})' raise TypeError(_lowerCamelCase ) def _a ( self : Dict ): """simple docstring""" A_ : Optional[Any] = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): A_ : Union[str, Any] = self[r, c] return result def _a ( self : str , _lowerCamelCase : Matrix , _lowerCamelCase : Matrix ): """simple docstring""" assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate A_ : Tuple = v.transpose() A_ : Dict = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def snake_case__ ( ) -> None: # a^(-1) A_ : Union[str, Any] = Matrix(3 , 3 , 0 ) for i in range(3 ): A_ : Optional[Any] = 1 print(f'a^(-1) is {ainv}' ) # u, v A_ : str = Matrix(3 , 1 , 0 ) A_ ,A_ ,A_ : int = 1, 2, -3 A_ : str = Matrix(3 , 1 , 0 ) A_ ,A_ ,A_ : Optional[Any] = 4, -2, 5 print(f'u is {u}' ) print(f'v is {v}' ) print(f'uv^T is {u * v.transpose()}' ) # Sherman Morrison print(f'(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase__ , lowerCamelCase__ )}' ) def snake_case__ ( ) -> None: import doctest doctest.testmod() testa()
4
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() snake_case__ = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) snake_case__ = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias')) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias')) rename_keys.append( (F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight', F'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias', F'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias')) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias')) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""), ("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ] ) def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ) -> Optional[Any]: A_ : Tuple = state_dict.pop(lowerCamelCase__ ) A_ : Optional[Any] = val def snake_case__ ( lowerCamelCase__ : Dict ) -> Any: A_ : int = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: A_ : int = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' ) A_ : List[str] = value else: A_ : Optional[int] = value return new_state_dict def snake_case__ ( lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]: A_ : Any = '''''' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) A_ : Tuple = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' ) A_ : Dict = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict A_ : str = in_proj_weight[:2_5_6, :] A_ : Optional[Any] = in_proj_bias[:2_5_6] A_ : Dict = in_proj_weight[2_5_6:5_1_2, :] A_ : Tuple = in_proj_bias[2_5_6:5_1_2] A_ : Tuple = in_proj_weight[-2_5_6:, :] A_ : Optional[int] = in_proj_bias[-2_5_6:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention A_ : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' ) A_ : Dict = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict A_ : List[str] = in_proj_weight[:2_5_6, :] A_ : int = in_proj_bias[:2_5_6] A_ : Any = in_proj_weight[2_5_6:5_1_2, :] A_ : List[str] = in_proj_bias[2_5_6:5_1_2] A_ : Union[str, Any] = in_proj_weight[-2_5_6:, :] A_ : Optional[Any] = in_proj_bias[-2_5_6:] # read in weights + bias of input projection layer of cross-attention A_ : Tuple = state_dict.pop( f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' ) A_ : Optional[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' ) # next, add query, keys and values (in that order) of cross-attention to the state dict A_ : Dict = in_proj_weight_cross_attn[:2_5_6, :] A_ : Tuple = in_proj_bias_cross_attn[:2_5_6] A_ : int = in_proj_weight_cross_attn[2_5_6:5_1_2, :] A_ : List[str] = in_proj_bias_cross_attn[2_5_6:5_1_2] A_ : Any = in_proj_weight_cross_attn[-2_5_6:, :] A_ : Any = in_proj_bias_cross_attn[-2_5_6:] def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ) -> Dict: A_ ,A_ : int = image.size A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ ) A_ : Optional[Any] = 8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0 A_ : Union[str, Any] = target_max_size / current_max_size A_ : Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def snake_case__ ( lowerCamelCase__ : Tuple ) -> str: A_ : Any = F.to_tensor(lowerCamelCase__ ) A_ : Optional[Any] = F.normalize(lowerCamelCase__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ) return image @torch.no_grad() def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> str: logger.info('''Converting model...''' ) # load original state dict A_ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location='''cpu''' ) # rename keys for src, dest in rename_keys: rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) A_ : str = rename_backbone_keys(lowerCamelCase__ ) # query, key and value matrices need special treatment read_in_q_k_v(lowerCamelCase__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them A_ : List[Any] = '''model.''' for key in state_dict.copy().keys(): if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ): A_ : List[Any] = state_dict.pop(lowerCamelCase__ ) A_ : str = val # create HuggingFace model and load state dict A_ : Union[str, Any] = TableTransformerConfig( backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: A_ : Dict = 1_5 A_ : Dict = 2 A_ : int = {0: '''table''', 1: '''table rotated'''} A_ : List[str] = idalabel A_ : Optional[int] = {v: k for k, v in idalabel.items()} else: A_ : Union[str, Any] = 1_2_5 A_ : Optional[Any] = 6 A_ : Optional[Any] = { 0: '''table''', 1: '''table column''', 2: '''table row''', 3: '''table column header''', 4: '''table projected row header''', 5: '''table spanning cell''', } A_ : int = idalabel A_ : Tuple = {v: k for k, v in idalabel.items()} A_ : Optional[Any] = DetrImageProcessor( format='''coco_detection''' , max_size=8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0 ) A_ : int = TableTransformerForObjectDetection(lowerCamelCase__ ) model.load_state_dict(lowerCamelCase__ ) model.eval() # verify our conversion A_ : Optional[int] = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png''' A_ : Union[str, Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=lowerCamelCase__ ) A_ : Tuple = Image.open(lowerCamelCase__ ).convert('''RGB''' ) A_ : int = normalize(resize(lowerCamelCase__ , lowerCamelCase__ ) ).unsqueeze(0 ) A_ : str = model(lowerCamelCase__ ) if "detection" in checkpoint_url: A_ : str = (1, 1_5, 3) A_ : int = torch.tensor( [[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] ) A_ : Tuple = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] ) else: A_ : Optional[int] = (1, 1_2_5, 7) A_ : Dict = torch.tensor( [[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] ) A_ : Any = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' ) Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) model.save_pretrained(lowerCamelCase__ ) image_processor.save_pretrained(lowerCamelCase__ ) if push_to_hub: # Push model to HF hub logger.info('''Pushing model to the hub...''' ) A_ : List[Any] = ( '''microsoft/table-transformer-detection''' if '''detection''' in checkpoint_url else '''microsoft/table-transformer-structure-recognition''' ) model.push_to_hub(lowerCamelCase__ ) image_processor.push_to_hub(lowerCamelCase__ ) if __name__ == "__main__": snake_case__ = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", type=str, choices=[ """https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", """https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""", ], help="""URL of the Table Transformer checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) snake_case__ = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
4
1
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : str ) -> list: A_ : Optional[int] = len(lowerCamelCase__ ) A_ : Union[str, Any] = [] for i in range(len(lowerCamelCase__ ) - pat_len + 1 ): A_ : Optional[int] = True for j in range(lowerCamelCase__ ): if s[i + j] != pattern[j]: A_ : int = False break if match_found: position.append(lowerCamelCase__ ) return position if __name__ == "__main__": assert naive_pattern_search("""ABCDEFG""", """DE""") == [3] print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC"""))
4
'''simple docstring''' import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) snake_case__ = logging.getLogger(__name__) @dataclass(frozen=a__ ) class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None @dataclass(frozen=a__ ) class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None if is_torch_available(): import torch from torch.utils.data import Dataset class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 42 def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : List[Any]=False , _lowerCamelCase : bool = False , ): """simple docstring""" A_ : Optional[int] = hans_processors[task]() A_ : int = os.path.join( _lowerCamelCase , '''cached_{}_{}_{}_{}'''.format( '''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(_lowerCamelCase ) , _lowerCamelCase , ) , ) A_ : Dict = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) A_ ,A_ : List[str] = label_list[2], label_list[1] A_ : Optional[int] = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. A_ : str = cached_features_file + '''.lock''' with FileLock(_lowerCamelCase ): if os.path.exists(_lowerCamelCase ) and not overwrite_cache: logger.info(f'Loading features from cached file {cached_features_file}' ) A_ : List[str] = torch.load(_lowerCamelCase ) else: logger.info(f'Creating features from dataset file at {data_dir}' ) A_ : Optional[int] = ( processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase ) ) logger.info('''Training examples: %s''' , len(_lowerCamelCase ) ) A_ : Optional[int] = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) logger.info('''Saving features into cached file %s''' , _lowerCamelCase ) torch.save(self.features , _lowerCamelCase ) def __len__( self : List[str] ): """simple docstring""" return len(self.features ) def __getitem__( self : List[str] , _lowerCamelCase : Optional[int] ): """simple docstring""" return self.features[i] def _a ( self : str ): """simple docstring""" return self.label_list if is_tf_available(): import tensorflow as tf class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = 128 , _lowerCamelCase : Dict=False , _lowerCamelCase : bool = False , ): """simple docstring""" A_ : Optional[int] = hans_processors[task]() A_ : Optional[int] = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) A_ ,A_ : Union[str, Any] = label_list[2], label_list[1] A_ : Tuple = label_list A_ : Optional[int] = processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase ) A_ : Tuple = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ): if ex_index % 10000 == 0: logger.info('''Writing example %d of %d''' % (ex_index, len(_lowerCamelCase )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) A_ : List[Any] = tf.data.Dataset.from_generator( _lowerCamelCase , ( { '''example_id''': tf.intaa, '''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa, }, tf.intaa, ) , ( { '''example_id''': tf.TensorShape([] ), '''input_ids''': tf.TensorShape([None, None] ), '''attention_mask''': tf.TensorShape([None, None] ), '''token_type_ids''': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def _a ( self : Any ): """simple docstring""" return self.dataset def __len__( self : Dict ): """simple docstring""" return len(self.features ) def __getitem__( self : Optional[int] , _lowerCamelCase : List[str] ): """simple docstring""" return self.features[i] def _a ( self : Tuple ): """simple docstring""" return self.label_list class UpperCamelCase_ (a__ ): """simple docstring""" def _a ( self : List[str] , _lowerCamelCase : Union[str, Any] ): """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_train_set.txt''' ) ) , '''train''' ) def _a ( self : List[str] , _lowerCamelCase : Tuple ): """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' ) def _a ( self : Any ): """simple docstring""" return ["contradiction", "entailment", "neutral"] def _a ( self : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ): """simple docstring""" A_ : Tuple = [] for i, line in enumerate(_lowerCamelCase ): if i == 0: continue A_ : str = '''%s-%s''' % (set_type, line[0]) A_ : Optional[Any] = line[5] A_ : Union[str, Any] = line[6] A_ : List[str] = line[7][2:] if line[7].startswith('''ex''' ) else line[7] A_ : str = line[0] examples.append(InputExample(guid=_lowerCamelCase , text_a=_lowerCamelCase , text_b=_lowerCamelCase , label=_lowerCamelCase , pairID=_lowerCamelCase ) ) return examples def snake_case__ ( lowerCamelCase__ : List[InputExample] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : PreTrainedTokenizer , ) -> int: A_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase__ )} A_ : Optional[Any] = [] for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase__ ) , desc='''convert examples to features''' ): if ex_index % 1_0_0_0_0 == 0: logger.info('''Writing example %d''' % (ex_index) ) A_ : Optional[int] = tokenizer( example.text_a , example.text_b , add_special_tokens=lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' , truncation=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , ) A_ : List[str] = label_map[example.label] if example.label in label_map else 0 A_ : Tuple = int(example.pairID ) features.append(InputFeatures(**lowerCamelCase__ , label=lowerCamelCase__ , pairID=lowerCamelCase__ ) ) for i, example in enumerate(examples[:5] ): logger.info('''*** Example ***''' ) logger.info(f'guid: {example}' ) logger.info(f'features: {features[i]}' ) return features snake_case__ = { """hans""": 3, } snake_case__ = { """hans""": HansProcessor, }
4
1
'''simple docstring''' import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = (PNDMScheduler,) _lowerCAmelCase = (('num_inference_steps', 5_0),) def _a ( self : Optional[Any] , **_lowerCamelCase : Tuple ): """simple docstring""" A_ : Optional[int] = { '''num_train_timesteps''': 1000, '''beta_start''': 0.00_01, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', } config.update(**_lowerCamelCase ) return config def _a ( self : Dict , _lowerCamelCase : Optional[int]=0 , **_lowerCamelCase : Optional[Any] ): """simple docstring""" A_ : int = dict(self.forward_default_kwargs ) A_ : Any = kwargs.pop('''num_inference_steps''' , _lowerCamelCase ) A_ : List[str] = self.dummy_sample A_ : List[Any] = 0.1 * sample A_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: A_ : List[str] = self.get_scheduler_config(**_lowerCamelCase ) A_ : Any = scheduler_class(**_lowerCamelCase ) scheduler.set_timesteps(_lowerCamelCase ) # copy over dummy past residuals A_ : List[Any] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_lowerCamelCase ) A_ : int = scheduler_class.from_pretrained(_lowerCamelCase ) new_scheduler.set_timesteps(_lowerCamelCase ) # copy over dummy past residuals A_ : Optional[int] = dummy_past_residuals[:] A_ : List[str] = scheduler.step_prk(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample A_ : List[str] = new_scheduler.step_prk(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" A_ : List[str] = scheduler.step_plms(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample A_ : List[str] = new_scheduler.step_plms(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _a ( self : int ): """simple docstring""" pass def _a ( self : Union[str, Any] , _lowerCamelCase : Any=0 , **_lowerCamelCase : List[Any] ): """simple docstring""" A_ : Any = dict(self.forward_default_kwargs ) A_ : List[str] = kwargs.pop('''num_inference_steps''' , _lowerCamelCase ) A_ : Dict = self.dummy_sample A_ : List[Any] = 0.1 * sample A_ : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: A_ : Union[str, Any] = self.get_scheduler_config() A_ : int = scheduler_class(**_lowerCamelCase ) scheduler.set_timesteps(_lowerCamelCase ) # copy over dummy past residuals (must be after setting timesteps) A_ : Optional[int] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_lowerCamelCase ) A_ : List[str] = scheduler_class.from_pretrained(_lowerCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(_lowerCamelCase ) # copy over dummy past residual (must be after setting timesteps) A_ : List[Any] = dummy_past_residuals[:] A_ : Optional[Any] = scheduler.step_prk(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample A_ : Tuple = new_scheduler.step_prk(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" A_ : List[str] = scheduler.step_plms(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample A_ : List[Any] = new_scheduler.step_plms(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _a ( self : str , **_lowerCamelCase : List[Any] ): """simple docstring""" A_ : List[Any] = self.scheduler_classes[0] A_ : Any = self.get_scheduler_config(**_lowerCamelCase ) A_ : Union[str, Any] = scheduler_class(**_lowerCamelCase ) A_ : int = 10 A_ : List[Any] = self.dummy_model() A_ : str = self.dummy_sample_deter scheduler.set_timesteps(_lowerCamelCase ) for i, t in enumerate(scheduler.prk_timesteps ): A_ : Optional[Any] = model(_lowerCamelCase , _lowerCamelCase ) A_ : int = scheduler.step_prk(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): A_ : str = model(_lowerCamelCase , _lowerCamelCase ) A_ : Dict = scheduler.step_plms(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample return sample def _a ( self : str ): """simple docstring""" A_ : str = dict(self.forward_default_kwargs ) A_ : Union[str, Any] = kwargs.pop('''num_inference_steps''' , _lowerCamelCase ) for scheduler_class in self.scheduler_classes: A_ : Optional[int] = self.get_scheduler_config() A_ : Any = scheduler_class(**_lowerCamelCase ) A_ : Union[str, Any] = self.dummy_sample A_ : Dict = 0.1 * sample if num_inference_steps is not None and hasattr(_lowerCamelCase , '''set_timesteps''' ): scheduler.set_timesteps(_lowerCamelCase ) elif num_inference_steps is not None and not hasattr(_lowerCamelCase , '''set_timesteps''' ): A_ : Union[str, Any] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) A_ : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] A_ : Optional[Any] = dummy_past_residuals[:] A_ : int = scheduler.step_prk(_lowerCamelCase , 0 , _lowerCamelCase , **_lowerCamelCase ).prev_sample A_ : List[str] = scheduler.step_prk(_lowerCamelCase , 1 , _lowerCamelCase , **_lowerCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) A_ : Dict = scheduler.step_plms(_lowerCamelCase , 0 , _lowerCamelCase , **_lowerCamelCase ).prev_sample A_ : List[Any] = scheduler.step_plms(_lowerCamelCase , 1 , _lowerCamelCase , **_lowerCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def _a ( self : Optional[Any] ): """simple docstring""" for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=_lowerCamelCase ) def _a ( self : Dict ): """simple docstring""" for steps_offset in [0, 1]: self.check_over_configs(steps_offset=_lowerCamelCase ) A_ : Union[str, Any] = self.scheduler_classes[0] A_ : Dict = self.get_scheduler_config(steps_offset=1 ) A_ : Optional[Any] = scheduler_class(**_lowerCamelCase ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , ) def _a ( self : Optional[int] ): """simple docstring""" for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ): self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase ) def _a ( self : List[str] ): """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_lowerCamelCase ) def _a ( self : Dict ): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_lowerCamelCase ) def _a ( self : Union[str, Any] ): """simple docstring""" for t in [1, 5, 10]: self.check_over_forward(time_step=_lowerCamelCase ) def _a ( self : List[str] ): """simple docstring""" for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=_lowerCamelCase ) def _a ( self : Dict ): """simple docstring""" A_ : Optional[Any] = 27 for scheduler_class in self.scheduler_classes: A_ : Optional[Any] = self.dummy_sample A_ : List[Any] = 0.1 * sample A_ : Tuple = self.get_scheduler_config() A_ : str = scheduler_class(**_lowerCamelCase ) scheduler.set_timesteps(_lowerCamelCase ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): A_ : Dict = scheduler.step_prk(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample def _a ( self : str ): """simple docstring""" with self.assertRaises(_lowerCamelCase ): A_ : Optional[Any] = self.scheduler_classes[0] A_ : List[str] = self.get_scheduler_config() A_ : Union[str, Any] = scheduler_class(**_lowerCamelCase ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def _a ( self : List[str] ): """simple docstring""" A_ : str = self.full_loop() A_ : Tuple = torch.sum(torch.abs(_lowerCamelCase ) ) A_ : Dict = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2 assert abs(result_mean.item() - 0.25_80 ) < 1E-3 def _a ( self : Optional[Any] ): """simple docstring""" A_ : Optional[int] = self.full_loop(prediction_type='''v_prediction''' ) A_ : Optional[int] = torch.sum(torch.abs(_lowerCamelCase ) ) A_ : str = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 67.39_86 ) < 1E-2 assert abs(result_mean.item() - 0.08_78 ) < 1E-3 def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Optional[int] = self.full_loop(set_alpha_to_one=_lowerCamelCase , beta_start=0.01 ) A_ : List[str] = torch.sum(torch.abs(_lowerCamelCase ) ) A_ : List[str] = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2 assert abs(result_mean.item() - 0.29_95 ) < 1E-3 def _a ( self : str ): """simple docstring""" A_ : int = self.full_loop(set_alpha_to_one=_lowerCamelCase , beta_start=0.01 ) A_ : Any = torch.sum(torch.abs(_lowerCamelCase ) ) A_ : str = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2 assert abs(result_mean.item() - 0.24_34 ) < 1E-3
4
'''simple docstring''' import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline snake_case__ = datasets.utils.logging.get_logger(__name__) @dataclass class UpperCamelCase_ (datasets.BuilderConfig ): """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = "utf-8" _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = True # deprecated _lowerCAmelCase = None # deprecated _lowerCAmelCase = 1_0 << 2_0 # 10MB _lowerCAmelCase = None class UpperCamelCase_ (datasets.ArrowBasedBuilder ): """simple docstring""" _lowerCAmelCase = JsonConfig def _a ( self : int ): """simple docstring""" if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' ) A_ : List[Any] = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' ) if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' ) return datasets.DatasetInfo(features=self.config.features ) def _a ( self : Any , _lowerCamelCase : List[str] ): """simple docstring""" if not self.config.data_files: raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' ) A_ : int = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_lowerCamelCase , (str, list, tuple) ): A_ : Union[str, Any] = data_files if isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : List[str] = [files] A_ : List[Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] A_ : Tuple = [] for split_name, files in data_files.items(): if isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : int = [files] A_ : Union[str, Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files] splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'''files''': files} ) ) return splits def _a ( self : int , _lowerCamelCase : pa.Table ): """simple docstring""" if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): A_ : Optional[int] = self.config.features.arrow_schema.field(_lowerCamelCase ).type A_ : Optional[int] = pa_table.append_column(_lowerCamelCase , pa.array([None] * len(_lowerCamelCase ) , type=_lowerCamelCase ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example A_ : str = table_cast(_lowerCamelCase , self.config.features.arrow_schema ) return pa_table def _a ( self : List[str] , _lowerCamelCase : int ): """simple docstring""" for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: A_ : int = json.load(_lowerCamelCase ) # We keep only the field we are interested in A_ : List[str] = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(_lowerCamelCase , (list, tuple) ): A_ : int = set().union(*[row.keys() for row in dataset] ) A_ : List[str] = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys} else: A_ : Tuple = dataset A_ : Dict = pa.Table.from_pydict(_lowerCamelCase ) yield file_idx, self._cast_table(_lowerCamelCase ) # If the file has one json object per line else: with open(_lowerCamelCase , '''rb''' ) as f: A_ : int = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small A_ : int = max(self.config.chunksize // 32 , 16 << 10 ) A_ : int = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: A_ : Any = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(_lowerCamelCase ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": A_ : Optional[Any] = batch.decode(self.config.encoding , errors=_lowerCamelCase ).encode('''utf-8''' ) try: while True: try: A_ : List[Any] = paj.read_json( io.BytesIO(_lowerCamelCase ) , read_options=paj.ReadOptions(block_size=_lowerCamelCase ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(_lowerCamelCase , pa.ArrowInvalid ) and "straddling" not in str(_lowerCamelCase ) or block_size > len(_lowerCamelCase ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f'Batch of {len(_lowerCamelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( _lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: A_ : Optional[Any] = json.load(_lowerCamelCase ) except json.JSONDecodeError: logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(_lowerCamelCase , _lowerCamelCase ): # list is the only sequence type supported in JSON try: A_ : Optional[int] = set().union(*[row.keys() for row in dataset] ) A_ : Tuple = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys} A_ : int = pa.Table.from_pydict(_lowerCamelCase ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' ) raise ValueError(f'Not able to read records in the JSON file at {file}.' ) from None yield file_idx, self._cast_table(_lowerCamelCase ) break else: logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' ) raise ValueError( f'Not able to read records in the JSON file at {file}. ' f'You should probably indicate the field of the JSON file containing your records. ' f'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. ' f'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_lowerCamelCase ) batch_idx += 1
4
1
'''simple docstring''' import pprint import requests snake_case__ = """https://zenquotes.io/api""" def snake_case__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '''/today''' ).json() def snake_case__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '''/random''' ).json() if __name__ == "__main__": snake_case__ = random_quotes() pprint.pprint(response)
4
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices snake_case__ = logging.get_logger(__name__) snake_case__ = { """microsoft/swin-tiny-patch4-window7-224""": ( """https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json""" ), # See all Swin models at https://huggingface.co/models?filter=swin } class UpperCamelCase_ (a__, a__ ): """simple docstring""" _lowerCAmelCase = 'swin' _lowerCAmelCase = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : Any , _lowerCamelCase : Optional[Any]=224 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Tuple=96 , _lowerCamelCase : List[Any]=[2, 2, 6, 2] , _lowerCamelCase : List[str]=[3, 6, 12, 24] , _lowerCamelCase : List[Any]=7 , _lowerCamelCase : Optional[int]=4.0 , _lowerCamelCase : List[str]=True , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Any=0.0 , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : Tuple=False , _lowerCamelCase : Dict=0.02 , _lowerCamelCase : Optional[Any]=1E-5 , _lowerCamelCase : Any=32 , _lowerCamelCase : Tuple=None , _lowerCamelCase : Any=None , **_lowerCamelCase : str , ): """simple docstring""" super().__init__(**_lowerCamelCase ) A_ : Optional[int] = image_size A_ : Optional[int] = patch_size A_ : Optional[int] = num_channels A_ : Any = embed_dim A_ : List[Any] = depths A_ : Any = len(_lowerCamelCase ) A_ : List[Any] = num_heads A_ : Tuple = window_size A_ : Tuple = mlp_ratio A_ : Dict = qkv_bias A_ : List[str] = hidden_dropout_prob A_ : List[str] = attention_probs_dropout_prob A_ : Any = drop_path_rate A_ : List[Any] = hidden_act A_ : Tuple = use_absolute_embeddings A_ : int = layer_norm_eps A_ : Optional[Any] = initializer_range A_ : Union[str, Any] = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model A_ : str = int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) ) A_ : str = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(_lowerCamelCase ) + 1 )] A_ ,A_ : Optional[Any] = get_aligned_output_features_output_indices( out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names ) class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = version.parse('1.11' ) @property def _a ( self : str ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def _a ( self : Union[str, Any] ): """simple docstring""" return 1E-4
4
1
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : int ) -> bool: A_ : Dict = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
4
'''simple docstring''' from __future__ import annotations def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> list[int]: A_ : int = 0 A_ : str = len(lowerCamelCase__ ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: A_ : Tuple = i + 1 else: A_ : List[str] = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(F'{two_pointer([2, 7, 11, 15], 9) = }')
4
1
'''simple docstring''' import numpy as np import torch from imwatermark import WatermarkEncoder # Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66 snake_case__ = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10 # bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 snake_case__ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] class UpperCamelCase_ : """simple docstring""" def __init__( self : Union[str, Any] ): """simple docstring""" A_ : Tuple = WATERMARK_BITS A_ : Tuple = WatermarkEncoder() self.encoder.set_watermark('''bits''' , self.watermark ) def _a ( self : Dict , _lowerCamelCase : torch.FloatTensor ): """simple docstring""" if images.shape[-1] < 256: return images A_ : Union[str, Any] = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy() A_ : List[Any] = [self.encoder.encode(_lowerCamelCase , '''dwtDct''' ) for image in images] A_ : Union[str, Any] = torch.from_numpy(np.array(_lowerCamelCase ) ).permute(0 , 3 , 1 , 2 ) A_ : Union[str, Any] = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 ) return images
4
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool: return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(lowerCamelCase__ ) ) def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool: # Base Case if index == len(lowerCamelCase__ ): return True # Recursive Step for i in range(lowerCamelCase__ ): if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ): # Color current vertex A_ : int = i # Validate coloring if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ): return True # Backtrack A_ : str = -1 return False def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[int]: A_ : List[str] = [-1] * len(lowerCamelCase__ ) if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ): return colored_vertices return []
4
1
'''simple docstring''' class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[Any] , _lowerCamelCase : Union[str, Any] ): """simple docstring""" A_ : Union[str, Any] = val A_ : Tuple = None A_ : Any = None def _a ( self : Tuple , _lowerCamelCase : List[Any] ): """simple docstring""" if self.val: if val < self.val: if self.left is None: A_ : int = Node(_lowerCamelCase ) else: self.left.insert(_lowerCamelCase ) elif val > self.val: if self.right is None: A_ : List[str] = Node(_lowerCamelCase ) else: self.right.insert(_lowerCamelCase ) else: A_ : Any = val def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] ) -> str: # Recursive traversal if root: inorder(root.left , lowerCamelCase__ ) res.append(root.val ) inorder(root.right , lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> Tuple: # Build BST if len(lowerCamelCase__ ) == 0: return arr A_ : Dict = Node(arr[0] ) for i in range(1 , len(lowerCamelCase__ ) ): root.insert(arr[i] ) # Traverse BST in order. A_ : Tuple = [] inorder(lowerCamelCase__ , lowerCamelCase__ ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
4
'''simple docstring''' from __future__ import annotations from PIL import Image # Define glider example snake_case__ = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example snake_case__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def snake_case__ ( lowerCamelCase__ : list[list[int]] ) -> list[list[int]]: A_ : str = [] for i in range(len(lowerCamelCase__ ) ): A_ : Optional[Any] = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours A_ : Optional[int] = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(lowerCamelCase__ ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(lowerCamelCase__ ) - 1: neighbour_count += cells[i + 1][j] if i < len(lowerCamelCase__ ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. A_ : List[str] = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(lowerCamelCase__ ) return next_generation def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[Image.Image]: A_ : List[Any] = [] for _ in range(lowerCamelCase__ ): # Create output image A_ : Optional[int] = Image.new('''RGB''' , (len(cells[0] ), len(lowerCamelCase__ )) ) A_ : int = img.load() # Save cells to image for x in range(len(lowerCamelCase__ ) ): for y in range(len(cells[0] ) ): A_ : Optional[Any] = 2_5_5 - cells[y][x] * 2_5_5 A_ : str = (colour, colour, colour) # Save image images.append(lowerCamelCase__ ) A_ : Optional[int] = new_generation(lowerCamelCase__ ) return images if __name__ == "__main__": snake_case__ = generate_images(GLIDER, 16) images[0].save("""out.gif""", save_all=True, append_images=images[1:])
4
1
'''simple docstring''' import enum import warnings from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING snake_case__ = logging.get_logger(__name__) class UpperCamelCase_ (enum.Enum ): """simple docstring""" _lowerCAmelCase = 0 _lowerCAmelCase = 1 @add_end_docstrings(a__ ) class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'generated' def __init__( self : Optional[Any] , *_lowerCamelCase : Dict , **_lowerCamelCase : List[str] ): """simple docstring""" super().__init__(*_lowerCamelCase , **_lowerCamelCase ) self.check_model_type( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING ) def _a ( self : Dict , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Dict=None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Optional[int]=None , **_lowerCamelCase : int , ): """simple docstring""" A_ : Union[str, Any] = {} if truncation is not None: A_ : str = truncation A_ : Dict = generate_kwargs A_ : Any = {} if return_tensors is not None and return_type is None: A_ : str = ReturnType.TENSORS if return_tensors else ReturnType.TEXT if return_type is not None: A_ : str = return_type if clean_up_tokenization_spaces is not None: A_ : Tuple = clean_up_tokenization_spaces if stop_sequence is not None: A_ : Optional[int] = self.tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) if len(_lowerCamelCase ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) A_ : Any = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def _a ( self : str , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ): """simple docstring""" return True def _a ( self : int , *_lowerCamelCase : int , _lowerCamelCase : int ): """simple docstring""" A_ : Union[str, Any] = self.model.config.prefix if self.model.config.prefix is not None else '''''' if isinstance(args[0] , _lowerCamelCase ): if self.tokenizer.pad_token_id is None: raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' ) A_ : Optional[int] = ([prefix + arg for arg in args[0]],) A_ : List[str] = True elif isinstance(args[0] , _lowerCamelCase ): A_ : str = (prefix + args[0],) A_ : str = False else: raise ValueError( f' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`' ) A_ : Dict = self.tokenizer(*_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors=self.framework ) # This is produced by tokenizers but is an invalid generate kwargs if "token_type_ids" in inputs: del inputs["token_type_ids"] return inputs def __call__( self : str , *_lowerCamelCase : int , **_lowerCamelCase : List[Any] ): """simple docstring""" A_ : str = super().__call__(*_lowerCamelCase , **_lowerCamelCase ) if ( isinstance(args[0] , _lowerCamelCase ) and all(isinstance(_lowerCamelCase , _lowerCamelCase ) for el in args[0] ) and all(len(_lowerCamelCase ) == 1 for res in result ) ): return [res[0] for res in result] return result def _a ( self : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any]=TruncationStrategy.DO_NOT_TRUNCATE , **_lowerCamelCase : List[str] ): """simple docstring""" A_ : str = self._parse_and_tokenize(_lowerCamelCase , truncation=_lowerCamelCase , **_lowerCamelCase ) return inputs def _a ( self : Any , _lowerCamelCase : Tuple , **_lowerCamelCase : Optional[Any] ): """simple docstring""" if self.framework == "pt": A_ ,A_ : Dict = model_inputs['''input_ids'''].shape elif self.framework == "tf": A_ ,A_ : Any = tf.shape(model_inputs['''input_ids'''] ).numpy() A_ : Optional[int] = generate_kwargs.get('''min_length''' , self.model.config.min_length ) A_ : int = generate_kwargs.get('''max_length''' , self.model.config.max_length ) self.check_inputs(_lowerCamelCase , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] ) A_ : int = self.model.generate(**_lowerCamelCase , **_lowerCamelCase ) A_ : Optional[int] = output_ids.shape[0] if self.framework == "pt": A_ : Optional[Any] = output_ids.reshape(_lowerCamelCase , out_b // in_b , *output_ids.shape[1:] ) elif self.framework == "tf": A_ : Any = tf.reshape(_lowerCamelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) ) return {"output_ids": output_ids} def _a ( self : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int=ReturnType.TEXT , _lowerCamelCase : Any=False ): """simple docstring""" A_ : List[str] = [] for output_ids in model_outputs["output_ids"][0]: if return_type == ReturnType.TENSORS: A_ : Union[str, Any] = {f'{self.return_name}_token_ids': output_ids} elif return_type == ReturnType.TEXT: A_ : Optional[int] = { f'{self.return_name}_text': self.tokenizer.decode( _lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase , ) } records.append(_lowerCamelCase ) return records @add_end_docstrings(a__ ) class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'summary' def __call__( self : List[str] , *_lowerCamelCase : Tuple , **_lowerCamelCase : str ): """simple docstring""" return super().__call__(*_lowerCamelCase , **_lowerCamelCase ) def _a ( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ): """simple docstring""" if max_length < min_length: logger.warning(f'Your min_length={min_length} must be inferior than your max_length={max_length}.' ) if input_length < max_length: logger.warning( f'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is ' '''a summarization task, where outputs shorter than the input are typically wanted, you might ''' f'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})' ) @add_end_docstrings(a__ ) class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'translation' def _a ( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ): """simple docstring""" if input_length > 0.9 * max_length: logger.warning( f'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider ' '''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' ) return True def _a ( self : int , *_lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=TruncationStrategy.DO_NOT_TRUNCATE , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Union[str, Any]=None ): """simple docstring""" if getattr(self.tokenizer , '''_build_translation_inputs''' , _lowerCamelCase ): return self.tokenizer._build_translation_inputs( *_lowerCamelCase , return_tensors=self.framework , truncation=_lowerCamelCase , src_lang=_lowerCamelCase , tgt_lang=_lowerCamelCase ) else: return super()._parse_and_tokenize(*_lowerCamelCase , truncation=_lowerCamelCase ) def _a ( self : int , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[int]=None , **_lowerCamelCase : Any ): """simple docstring""" A_ ,A_ ,A_ : Optional[Any] = super()._sanitize_parameters(**_lowerCamelCase ) if src_lang is not None: A_ : Dict = src_lang if tgt_lang is not None: A_ : Dict = tgt_lang if src_lang is None and tgt_lang is None: # Backward compatibility, direct arguments use is preferred. A_ : str = kwargs.get('''task''' , self.task ) A_ : Optional[int] = task.split('''_''' ) if task and len(_lowerCamelCase ) == 4: # translation, XX, to YY A_ : List[Any] = items[1] A_ : Any = items[3] return preprocess_params, forward_params, postprocess_params def __call__( self : Tuple , *_lowerCamelCase : Dict , **_lowerCamelCase : Optional[int] ): """simple docstring""" return super().__call__(*_lowerCamelCase , **_lowerCamelCase )
4
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Any = tempfile.mkdtemp() A_ : List[Any] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) A_ : Tuple = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], '''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], } A_ : List[Any] = os.path.join(self.tmpdirname , _lowerCamelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Dict , **_lowerCamelCase : Tuple ): """simple docstring""" return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def _a ( self : Optional[int] , **_lowerCamelCase : Optional[int] ): """simple docstring""" return BertTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def _a ( self : Optional[Any] , **_lowerCamelCase : Tuple ): """simple docstring""" return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def _a ( self : int ): """simple docstring""" A_ : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] A_ : Any = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _a ( self : int ): """simple docstring""" A_ : Tuple = self.get_tokenizer() A_ : Tuple = self.get_rust_tokenizer() A_ : Dict = self.get_image_processor() A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) A_ : str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase ) A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) A_ : List[Any] = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase ) self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase ) self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase ) def _a ( self : List[Any] ): """simple docstring""" A_ : List[str] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A_ : Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) A_ : Tuple = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 ) A_ : List[str] = AlignProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _lowerCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _lowerCamelCase ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Dict = self.get_image_processor() A_ : Any = self.get_tokenizer() A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : Any = self.prepare_image_inputs() A_ : List[Any] = image_processor(_lowerCamelCase , return_tensors='''np''' ) A_ : str = processor(images=_lowerCamelCase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _a ( self : Dict ): """simple docstring""" A_ : str = self.get_image_processor() A_ : List[str] = self.get_tokenizer() A_ : Optional[int] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : int = '''lower newer''' A_ : str = processor(text=_lowerCamelCase ) A_ : Dict = tokenizer(_lowerCamelCase , padding='''max_length''' , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _a ( self : str ): """simple docstring""" A_ : Optional[int] = self.get_image_processor() A_ : Optional[Any] = self.get_tokenizer() A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : List[Any] = '''lower newer''' A_ : Optional[int] = self.prepare_image_inputs() A_ : List[Any] = processor(text=_lowerCamelCase , images=_lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(_lowerCamelCase ): processor() def _a ( self : List[str] ): """simple docstring""" A_ : Optional[Any] = self.get_image_processor() A_ : Optional[int] = self.get_tokenizer() A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A_ : str = processor.batch_decode(_lowerCamelCase ) A_ : Union[str, Any] = tokenizer.batch_decode(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : str = self.get_image_processor() A_ : Tuple = self.get_tokenizer() A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : str = '''lower newer''' A_ : List[str] = self.prepare_image_inputs() A_ : Tuple = processor(text=_lowerCamelCase , images=_lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
4
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def __init__( self : int , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple=7 , _lowerCamelCase : List[Any]=3 , _lowerCamelCase : Tuple=18 , _lowerCamelCase : Optional[int]=30 , _lowerCamelCase : int=400 , _lowerCamelCase : Tuple=True , _lowerCamelCase : Any=None , _lowerCamelCase : int=True , ): """simple docstring""" A_ : str = size if size is not None else {'''height''': 18, '''width''': 18} A_ : str = parent A_ : str = batch_size A_ : List[Any] = num_channels A_ : Optional[Any] = image_size A_ : List[str] = min_resolution A_ : Tuple = max_resolution A_ : Dict = do_resize A_ : Optional[Any] = size A_ : Union[str, Any] = apply_ocr def _a ( self : List[str] ): """simple docstring""" return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None def _a ( self : str ): """simple docstring""" A_ : int = LayoutLMvaImageProcessingTester(self ) @property def _a ( self : Any ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _a ( self : Tuple ): """simple docstring""" A_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) ) self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) ) self.assertTrue(hasattr(_lowerCamelCase , '''apply_ocr''' ) ) def _a ( self : Optional[int] ): """simple docstring""" A_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} ) A_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) def _a ( self : Optional[int] ): """simple docstring""" pass def _a ( self : Any ): """simple docstring""" A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , Image.Image ) # Test not batched input A_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) self.assertIsInstance(encoding.words , _lowerCamelCase ) self.assertIsInstance(encoding.boxes , _lowerCamelCase ) # Test batched A_ : Optional[int] = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def _a ( self : int ): """simple docstring""" A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , np.ndarray ) # Test not batched input A_ : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched A_ : Any = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def _a ( self : str ): """simple docstring""" A_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase ) for image in image_inputs: self.assertIsInstance(_lowerCamelCase , torch.Tensor ) # Test not batched input A_ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched A_ : List[str] = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) def _a ( self : str ): """simple docstring""" A_ : Optional[int] = LayoutLMvaImageProcessor() from datasets import load_dataset A_ : Tuple = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' ) A_ : Dict = Image.open(ds[0]['''file'''] ).convert('''RGB''' ) A_ : Dict = image_processing(_lowerCamelCase , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 A_ : Any = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231 A_ : Optional[int] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , _lowerCamelCase ) self.assertListEqual(encoding.boxes , _lowerCamelCase ) # with apply_OCR = False A_ : Dict = LayoutLMvaImageProcessor(apply_ocr=_lowerCamelCase ) A_ : Dict = image_processing(_lowerCamelCase , return_tensors='''pt''' ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
4
'''simple docstring''' import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = """▁""" snake_case__ = { """vocab_file""": """vocab.json""", """spm_file""": """sentencepiece.bpe.model""", } snake_case__ = { """vocab_file""": { """facebook/s2t-small-librispeech-asr""": ( """https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json""" ), }, """spm_file""": { """facebook/s2t-small-librispeech-asr""": ( """https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model""" ) }, } snake_case__ = { """facebook/s2t-small-librispeech-asr""": 10_24, } snake_case__ = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""] snake_case__ = {"""mustc""": MUSTC_LANGS} class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = VOCAB_FILES_NAMES _lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase = MAX_MODEL_INPUT_SIZES _lowerCAmelCase = ['input_ids', 'attention_mask'] _lowerCAmelCase = [] def __init__( self : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : str="<s>" , _lowerCamelCase : Union[str, Any]="</s>" , _lowerCamelCase : Dict="<pad>" , _lowerCamelCase : str="<unk>" , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : int=False , _lowerCamelCase : Any=None , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[Dict[str, Any]] = None , **_lowerCamelCase : Optional[int] , ): """simple docstring""" A_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , do_upper_case=_lowerCamelCase , do_lower_case=_lowerCamelCase , tgt_lang=_lowerCamelCase , lang_codes=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , ) A_ : Optional[int] = do_upper_case A_ : Tuple = do_lower_case A_ : Tuple = load_json(_lowerCamelCase ) A_ : Tuple = {v: k for k, v in self.encoder.items()} A_ : List[Any] = spm_file A_ : List[str] = load_spm(_lowerCamelCase , self.sp_model_kwargs ) if lang_codes is not None: A_ : Any = lang_codes A_ : Optional[Any] = LANGUAGES[lang_codes] A_ : Optional[Any] = [f'<lang:{lang}>' for lang in self.langs] A_ : Union[str, Any] = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs} A_ : Optional[int] = self.lang_tokens A_ : int = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: A_ : Dict = {} @property def _a ( self : Tuple ): """simple docstring""" return len(self.encoder ) @property def _a ( self : int ): """simple docstring""" return self._tgt_lang @tgt_lang.setter def _a ( self : List[str] , _lowerCamelCase : Any ): """simple docstring""" A_ : int = new_tgt_lang self.set_tgt_lang_special_tokens(_lowerCamelCase ) def _a ( self : Tuple , _lowerCamelCase : str ): """simple docstring""" A_ : List[str] = self.lang_code_to_id[tgt_lang] A_ : Optional[Any] = [lang_code_id] def _a ( self : Optional[Any] , _lowerCamelCase : str ): """simple docstring""" return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase ) def _a ( self : List[Any] , _lowerCamelCase : int ): """simple docstring""" return self.encoder.get(_lowerCamelCase , self.encoder[self.unk_token] ) def _a ( self : int , _lowerCamelCase : int ): """simple docstring""" return self.decoder.get(_lowerCamelCase , self.unk_token ) def _a ( self : int , _lowerCamelCase : List[str] ): """simple docstring""" A_ : List[Any] = [] A_ : Any = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: A_ : Union[str, Any] = self.sp_model.decode(_lowerCamelCase ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " A_ : Optional[Any] = [] else: current_sub_tokens.append(_lowerCamelCase ) A_ : Tuple = self.sp_model.decode(_lowerCamelCase ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def _a ( self : int , _lowerCamelCase : Dict , _lowerCamelCase : Any=None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def _a ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase ) A_ : Tuple = [1] * len(self.prefix_tokens ) A_ : Tuple = [1] if token_ids_a is None: return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones def _a ( self : Dict ): """simple docstring""" A_ : Union[str, Any] = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Union[str, Any] ): """simple docstring""" A_ : Dict = self.__dict__.copy() A_ : List[Any] = None return state def __setstate__( self : List[str] , _lowerCamelCase : Dict ): """simple docstring""" A_ : Dict = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): A_ : Optional[int] = {} A_ : int = load_spm(self.spm_file , self.sp_model_kwargs ) def _a ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ): """simple docstring""" A_ : Dict = Path(_lowerCamelCase ) assert save_dir.is_dir(), f'{save_directory} should be a directory' A_ : Optional[int] = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file'''] ) A_ : Optional[int] = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file'''] ) save_json(self.encoder , _lowerCamelCase ) if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , _lowerCamelCase ) elif not os.path.isfile(self.spm_file ): with open(_lowerCamelCase , '''wb''' ) as fi: A_ : List[str] = self.sp_model.serialized_model_proto() fi.write(_lowerCamelCase ) return (str(_lowerCamelCase ), str(_lowerCamelCase )) def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor: A_ : Tuple = sentencepiece.SentencePieceProcessor(**lowerCamelCase__ ) spm.Load(str(lowerCamelCase__ ) ) return spm def snake_case__ ( lowerCamelCase__ : str ) -> Union[Dict, List]: with open(lowerCamelCase__ , '''r''' ) as f: return json.load(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : str ) -> None: with open(lowerCamelCase__ , '''w''' ) as f: json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=2 )
4
1
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = { """t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""", """t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""", """t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""", """t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""", """t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""", } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 't5' _lowerCAmelCase = ['past_key_values'] _lowerCAmelCase = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__( self : List[str] , _lowerCamelCase : Optional[int]=32128 , _lowerCamelCase : Union[str, Any]=512 , _lowerCamelCase : Optional[int]=64 , _lowerCamelCase : List[str]=2048 , _lowerCamelCase : Optional[int]=6 , _lowerCamelCase : List[Any]=None , _lowerCamelCase : List[Any]=8 , _lowerCamelCase : Dict=32 , _lowerCamelCase : int=128 , _lowerCamelCase : Optional[int]=0.1 , _lowerCamelCase : List[Any]=1E-6 , _lowerCamelCase : int=1.0 , _lowerCamelCase : int="relu" , _lowerCamelCase : Tuple=True , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : str=0 , _lowerCamelCase : str=1 , **_lowerCamelCase : List[Any] , ): """simple docstring""" A_ : Dict = vocab_size A_ : Any = d_model A_ : List[Any] = d_kv A_ : str = d_ff A_ : Tuple = num_layers A_ : int = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry A_ : Optional[Any] = num_heads A_ : List[str] = relative_attention_num_buckets A_ : int = relative_attention_max_distance A_ : str = dropout_rate A_ : Optional[Any] = layer_norm_epsilon A_ : str = initializer_factor A_ : Tuple = feed_forward_proj A_ : int = use_cache A_ : List[Any] = self.feed_forward_proj.split('''-''' ) A_ : int = act_info[-1] A_ : Tuple = act_info[0] == '''gated''' if len(_lowerCamelCase ) > 1 and act_info[0] != "gated" or len(_lowerCamelCase ) > 2: raise ValueError( f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.' '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": A_ : Any = '''gelu_new''' super().__init__( pad_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase , ) class UpperCamelCase_ (a__ ): """simple docstring""" @property def _a ( self : str ): """simple docstring""" A_ : int = { '''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''}, '''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''}, } if self.use_past: A_ : Optional[int] = '''past_encoder_sequence + sequence''' A_ : Optional[Any] = {0: '''batch'''} A_ : Any = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: A_ : Dict = {0: '''batch''', 1: '''decoder_sequence'''} A_ : List[Any] = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(_lowerCamelCase , direction='''inputs''' ) return common_inputs @property def _a ( self : Optional[Any] ): """simple docstring""" return 13
4
'''simple docstring''' import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 snake_case__ = sys.version_info >= (3, 10) def snake_case__ ( lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : str=None ) -> List[Any]: return field(default_factory=lambda: default , metadata=lowerCamelCase__ ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 4_2 _lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = False _lowerCAmelCase = True _lowerCAmelCase = None class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'titi' _lowerCAmelCase = 'toto' class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'titi' _lowerCAmelCase = 'toto' _lowerCAmelCase = 4_2 @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = "toto" def _a ( self : Optional[Any] ): """simple docstring""" A_ : Optional[int] = BasicEnum(self.foo ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = "toto" def _a ( self : Tuple ): """simple docstring""" A_ : Optional[Any] = MixedTypeEnum(self.foo ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} ) _lowerCAmelCase = None _lowerCAmelCase = list_field(default=[] ) _lowerCAmelCase = list_field(default=[] ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = list_field(default=[] ) _lowerCAmelCase = list_field(default=[1, 2, 3] ) _lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) _lowerCAmelCase = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = field() _lowerCAmelCase = field() _lowerCAmelCase = field() def _a ( self : Tuple ): """simple docstring""" A_ : Tuple = BasicEnum(self.required_enum ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = field() _lowerCAmelCase = None _lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} ) _lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) if is_python_no_less_than_3_10: @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = False _lowerCAmelCase = True _lowerCAmelCase = None @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} ) _lowerCAmelCase = None _lowerCAmelCase = list_field(default=[] ) _lowerCAmelCase = list_field(default=[] ) class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : List[str] , _lowerCamelCase : argparse.ArgumentParser , _lowerCamelCase : argparse.ArgumentParser ): """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): A_ : Union[str, Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''} A_ : Optional[Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('''choices''' , _lowerCamelCase ) and yy.get('''choices''' , _lowerCamelCase ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['''type'''](_lowerCamelCase ) , yy['''type'''](_lowerCamelCase ) ) del xx["type"], yy["type"] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Optional[int] ): """simple docstring""" A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase ) A_ : Optional[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--bar''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--baz''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--flag''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Union[str, Any] = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5'''] ((A_) ,) : List[str] = parser.parse_args_into_dataclasses(_lowerCamelCase , look_for_args_file=_lowerCamelCase ) self.assertFalse(example.flag ) def _a ( self : Dict ): """simple docstring""" A_ : int = HfArgumentParser(_lowerCamelCase ) A_ : int = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=42 , type=_lowerCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Dict ): """simple docstring""" A_ : Any = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) expected.add_argument('''--baz''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('''--no_baz''' , action='''store_false''' , default=_lowerCamelCase , dest='''baz''' ) expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase ) A_ : Dict = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(_lowerCamelCase ) for dataclass_type in dataclass_types: A_ : Any = HfArgumentParser(_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : List[Any] = parser.parse_args([] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : Optional[int] = parser.parse_args(['''--foo''', '''--no_baz'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : Union[str, Any] = parser.parse_args(['''--foo''', '''--baz'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : List[str] = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : List[Any] = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) def _a ( self : List[Any] ): """simple docstring""" A_ : str = HfArgumentParser(_lowerCamelCase ) A_ : Optional[int] = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : str = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) A_ : List[Any] = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) A_ : int = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) A_ : Dict = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) A_ : Tuple = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) A_ : List[str] = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def _a ( self : Optional[int] ): """simple docstring""" @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = "toto" A_ : List[str] = HfArgumentParser(_lowerCamelCase ) A_ : Tuple = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Tuple = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) A_ : List[str] = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) A_ : int = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) def _a ( self : Dict ): """simple docstring""" A_ : int = HfArgumentParser(_lowerCamelCase ) A_ : List[Any] = argparse.ArgumentParser() expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_lowerCamelCase ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase ) expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Optional[int] = parser.parse_args([] ) self.assertEqual( _lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , ) A_ : str = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() ) self.assertEqual(_lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) ) def _a ( self : Dict ): """simple docstring""" A_ : Optional[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=_lowerCamelCase , type=_lowerCamelCase ) expected.add_argument('''--bar''' , default=_lowerCamelCase , type=_lowerCamelCase , help='''help message''' ) expected.add_argument('''--baz''' , default=_lowerCamelCase , type=_lowerCamelCase ) expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) A_ : Tuple = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(_lowerCamelCase ) for dataclass_type in dataclass_types: A_ : int = HfArgumentParser(_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : List[Any] = parser.parse_args([] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , bar=_lowerCamelCase , baz=_lowerCamelCase , ces=[] , des=[] ) ) A_ : Optional[Any] = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() ) self.assertEqual(_lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) ) def _a ( self : List[Any] ): """simple docstring""" A_ : List[Any] = HfArgumentParser(_lowerCamelCase ) A_ : Dict = argparse.ArgumentParser() expected.add_argument('''--required_list''' , nargs='''+''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--required_str''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase ) A_ : List[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , ) expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : List[Any] = HfArgumentParser(_lowerCamelCase ) A_ : Union[str, Any] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } A_ : Optional[int] = parser.parse_dict(_lowerCamelCase )[0] A_ : str = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : List[str] ): """simple docstring""" A_ : Any = HfArgumentParser(_lowerCamelCase ) A_ : List[str] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, '''extra''': 42, } self.assertRaises(_lowerCamelCase , parser.parse_dict , _lowerCamelCase , allow_extra_keys=_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase ) A_ : List[str] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: A_ : Tuple = os.path.join(_lowerCamelCase , '''temp_json''' ) os.mkdir(_lowerCamelCase ) with open(temp_local_path + '''.json''' , '''w+''' ) as f: json.dump(_lowerCamelCase , _lowerCamelCase ) A_ : List[str] = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0] A_ : Optional[Any] = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : int ): """simple docstring""" A_ : int = HfArgumentParser(_lowerCamelCase ) A_ : Tuple = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: A_ : int = os.path.join(_lowerCamelCase , '''temp_yaml''' ) os.mkdir(_lowerCamelCase ) with open(temp_local_path + '''.yaml''' , '''w+''' ) as f: yaml.dump(_lowerCamelCase , _lowerCamelCase ) A_ : Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0] A_ : int = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Dict = HfArgumentParser(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase )
4
1
'''simple docstring''' import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @parameterized.expand([(None,), ('''foo.json''',)] ) def _a ( self : Tuple , _lowerCamelCase : Optional[int] ): """simple docstring""" A_ : Union[str, Any] = GenerationConfig( do_sample=_lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(_lowerCamelCase , config_name=_lowerCamelCase ) A_ : Optional[Any] = GenerationConfig.from_pretrained(_lowerCamelCase , config_name=_lowerCamelCase ) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , _lowerCamelCase ) self.assertEqual(loaded_config.temperature , 0.7 ) self.assertEqual(loaded_config.length_penalty , 1.0 ) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] ) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50 ) self.assertEqual(loaded_config.max_length , 20 ) self.assertEqual(loaded_config.max_time , _lowerCamelCase ) def _a ( self : List[str] ): """simple docstring""" A_ : Optional[Any] = AutoConfig.from_pretrained('''gpt2''' ) A_ : Optional[Any] = GenerationConfig.from_model_config(_lowerCamelCase ) A_ : Optional[Any] = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(_lowerCamelCase , _lowerCamelCase ) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id ) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id ) def _a ( self : Optional[int] ): """simple docstring""" A_ : Tuple = GenerationConfig() A_ : Optional[Any] = { '''max_new_tokens''': 1024, '''foo''': '''bar''', } A_ : List[Any] = copy.deepcopy(_lowerCamelCase ) A_ : int = generation_config.update(**_lowerCamelCase ) # update_kwargs was not modified (no side effects) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1024 ) # `.update()` returns a dictionary of unused kwargs self.assertEqual(_lowerCamelCase , {'''foo''': '''bar'''} ) def _a ( self : Dict ): """simple docstring""" A_ : Tuple = GenerationConfig() A_ : Any = '''bar''' with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir: generation_config.save_pretrained(_lowerCamelCase ) A_ : Tuple = GenerationConfig.from_pretrained(_lowerCamelCase ) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , '''bar''' ) A_ : int = GenerationConfig.from_model_config(_lowerCamelCase ) assert not hasattr(_lowerCamelCase , '''foo''' ) # no new kwargs should be initialized if from config def _a ( self : Tuple ): """simple docstring""" A_ : Optional[int] = GenerationConfig() self.assertEqual(default_config.temperature , 1.0 ) self.assertEqual(default_config.do_sample , _lowerCamelCase ) self.assertEqual(default_config.num_beams , 1 ) A_ : Dict = GenerationConfig( do_sample=_lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7 ) self.assertEqual(config.do_sample , _lowerCamelCase ) self.assertEqual(config.num_beams , 1 ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(_lowerCamelCase ) A_ : int = GenerationConfig.from_pretrained(_lowerCamelCase , temperature=1.0 ) self.assertEqual(loaded_config.temperature , 1.0 ) self.assertEqual(loaded_config.do_sample , _lowerCamelCase ) self.assertEqual(loaded_config.num_beams , 1 ) # default value @is_staging_test class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @classmethod def _a ( cls : List[str] ): """simple docstring""" A_ : Optional[int] = TOKEN HfFolder.save_token(_lowerCamelCase ) @classmethod def _a ( cls : Optional[Any] ): """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-generation-config''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' ) except HTTPError: pass def _a ( self : Any ): """simple docstring""" A_ : List[str] = GenerationConfig( do_sample=_lowerCamelCase , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('''test-generation-config''' , use_auth_token=self._token ) A_ : List[Any] = GenerationConfig.from_pretrained(f'{USER}/test-generation-config' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-generation-config''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( _lowerCamelCase , repo_id='''test-generation-config''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token ) A_ : Any = GenerationConfig.from_pretrained(f'{USER}/test-generation-config' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) def _a ( self : Optional[int] ): """simple docstring""" A_ : Optional[Any] = GenerationConfig( do_sample=_lowerCamelCase , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token ) A_ : Dict = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( _lowerCamelCase , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token ) A_ : Union[str, Any] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
4
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 snake_case__ = get_tests_dir("""fixtures""") class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : List[str] ): """simple docstring""" A_ : List[Any] = mock.Mock() A_ : List[str] = 500 A_ : Tuple = {} A_ : int = HTTPError A_ : Optional[Any] = {} # Download this model to make sure it's in the cache. A_ : Tuple = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' , return_value=_lowerCamelCase ) as mock_head: A_ : List[Any] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # This check we did call the fake head request mock_head.assert_called() def _a ( self : Tuple ): """simple docstring""" A_ : Tuple = ViTImageProcessor.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' ) def _a ( self : Dict ): """simple docstring""" with self.assertRaises(_lowerCamelCase ): # config is in subfolder, the following should not work without specifying the subfolder A_ : Any = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' ) A_ : Tuple = AutoImageProcessor.from_pretrained( '''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' ) self.assertIsNotNone(_lowerCamelCase ) @is_staging_test class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @classmethod def _a ( cls : Tuple ): """simple docstring""" A_ : int = TOKEN HfFolder.save_token(_lowerCamelCase ) @classmethod def _a ( cls : str ): """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-image-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' ) except HTTPError: pass def _a ( self : List[Any] ): """simple docstring""" A_ : Dict = ViTImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token ) A_ : Optional[int] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( _lowerCamelCase , repo_id='''test-image-processor''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token ) A_ : List[Any] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : int = ViTImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token ) A_ : List[str] = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( _lowerCamelCase , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token ) A_ : Any = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) def _a ( self : Optional[Any] ): """simple docstring""" CustomImageProcessor.register_for_auto_class() A_ : Any = CustomImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , ) A_ : str = AutoImageProcessor.from_pretrained( f'{USER}/test-dynamic-image-processor' , trust_remote_code=_lowerCamelCase ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
4
1
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING snake_case__ = logging.get_logger(__name__) @add_end_docstrings(a__ ) class UpperCamelCase_ (a__ ): """simple docstring""" def __init__( self : List[str] , *_lowerCamelCase : Any , **_lowerCamelCase : Optional[int] ): """simple docstring""" super().__init__(*_lowerCamelCase , **_lowerCamelCase ) requires_backends(self , '''vision''' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def _a ( self : Optional[int] , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Tuple=None ): """simple docstring""" A_ : Optional[Any] = {} A_ : Optional[int] = {} if prompt is not None: A_ : List[Any] = prompt if generate_kwargs is not None: A_ : int = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: A_ : Dict = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,''' ''' please use only one''' ) A_ : int = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : Tuple , _lowerCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_lowerCamelCase : List[str] ): """simple docstring""" return super().__call__(_lowerCamelCase , **_lowerCamelCase ) def _a ( self : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int]=None ): """simple docstring""" A_ : List[Any] = load_image(_lowerCamelCase ) if prompt is not None: if not isinstance(_lowerCamelCase , _lowerCamelCase ): raise ValueError( f'Received an invalid text input, got - {type(_lowerCamelCase )} - but expected a single string. ' '''Note also that one single text can be provided for conditional image to text generation.''' ) A_ : Any = self.model.config.model_type if model_type == "git": A_ : Any = self.image_processor(images=_lowerCamelCase , return_tensors=self.framework ) A_ : List[str] = self.tokenizer(text=_lowerCamelCase , add_special_tokens=_lowerCamelCase ).input_ids A_ : Dict = [self.tokenizer.cls_token_id] + input_ids A_ : int = torch.tensor(_lowerCamelCase ).unsqueeze(0 ) model_inputs.update({'''input_ids''': input_ids} ) elif model_type == "pix2struct": A_ : Tuple = self.image_processor(images=_lowerCamelCase , header_text=_lowerCamelCase , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation A_ : Union[str, Any] = self.image_processor(images=_lowerCamelCase , return_tensors=self.framework ) A_ : Tuple = self.tokenizer(_lowerCamelCase , return_tensors=self.framework ) model_inputs.update(_lowerCamelCase ) else: raise ValueError(f'Model type {model_type} does not support conditional text generation' ) else: A_ : List[str] = self.image_processor(images=_lowerCamelCase , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: A_ : int = None return model_inputs def _a ( self : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int=None ): """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs['''input_ids'''] , _lowerCamelCase ) and all(x is None for x in model_inputs['''input_ids'''] ) ): A_ : Optional[int] = None if generate_kwargs is None: A_ : List[Any] = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. A_ : Optional[int] = model_inputs.pop(self.model.main_input_name ) A_ : Union[str, Any] = self.model.generate(_lowerCamelCase , **_lowerCamelCase , **_lowerCamelCase ) return model_outputs def _a ( self : Tuple , _lowerCamelCase : str ): """simple docstring""" A_ : Any = [] for output_ids in model_outputs: A_ : List[Any] = { '''generated_text''': self.tokenizer.decode( _lowerCamelCase , skip_special_tokens=_lowerCamelCase , ) } records.append(_lowerCamelCase ) return records
4
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = ( 'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.' 'It takes two arguments named `image` which should be the original image, and `label` which should be a text ' 'describing the elements what should be identified in the segmentation mask. The tool returns the mask.' ) _lowerCAmelCase = 'CIDAS/clipseg-rd64-refined' _lowerCAmelCase = 'image_segmenter' _lowerCAmelCase = CLIPSegForImageSegmentation _lowerCAmelCase = ['image', 'text'] _lowerCAmelCase = ['image'] def __init__( self : Optional[int] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Union[str, Any] ): """simple docstring""" requires_backends(self , ['''vision'''] ) super().__init__(*_lowerCamelCase , **_lowerCamelCase ) def _a ( self : List[str] , _lowerCamelCase : "Image" , _lowerCamelCase : str ): """simple docstring""" return self.pre_processor(text=[label] , images=[image] , padding=_lowerCamelCase , return_tensors='''pt''' ) def _a ( self : Union[str, Any] , _lowerCamelCase : Optional[int] ): """simple docstring""" with torch.no_grad(): A_ : Optional[int] = self.model(**_lowerCamelCase ).logits return logits def _a ( self : List[str] , _lowerCamelCase : Optional[int] ): """simple docstring""" A_ : int = outputs.cpu().detach().numpy() A_ : Tuple = 0 A_ : List[str] = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
4
1
'''simple docstring''' import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() snake_case__ = logging.get_logger("""transformers.models.encodec""") snake_case__ = { """quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""", """quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""", """quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""", """quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""", } snake_case__ = { """encoder.model.0.conv.conv""": """encoder.layers.0.conv""", """encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""", """encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""", """encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""", """encoder.model.3.conv.conv""": """encoder.layers.3.conv""", """encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""", """encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""", """encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""", """encoder.model.6.conv.conv""": """encoder.layers.6.conv""", """encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""", """encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""", """encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""", """encoder.model.9.conv.conv""": """encoder.layers.9.conv""", """encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""", """encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""", """encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""", """encoder.model.12.conv.conv""": """encoder.layers.12.conv""", """encoder.model.13.lstm""": """encoder.layers.13.lstm""", """encoder.model.15.conv.conv""": """encoder.layers.15.conv""", } snake_case__ = { """encoder.model.0.conv.norm""": """encoder.layers.0.norm""", """encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""", """encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""", """encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""", """encoder.model.3.conv.norm""": """encoder.layers.3.norm""", """encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""", """encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""", """encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""", """encoder.model.6.conv.norm""": """encoder.layers.6.norm""", """encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""", """encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""", """encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""", """encoder.model.9.conv.norm""": """encoder.layers.9.norm""", """encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""", """encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""", """encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""", """encoder.model.12.conv.norm""": """encoder.layers.12.norm""", """encoder.model.15.conv.norm""": """encoder.layers.15.norm""", } snake_case__ = { """decoder.model.0.conv.conv""": """decoder.layers.0.conv""", """decoder.model.1.lstm""": """decoder.layers.1.lstm""", """decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""", """decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""", """decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""", """decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""", """decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""", """decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""", """decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""", """decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""", """decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""", """decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""", """decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""", """decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""", """decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""", """decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""", """decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""", """decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""", """decoder.model.15.conv.conv""": """decoder.layers.15.conv""", } snake_case__ = { """decoder.model.0.conv.norm""": """decoder.layers.0.norm""", """decoder.model.3.convtr.norm""": """decoder.layers.3.norm""", """decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""", """decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""", """decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""", """decoder.model.6.convtr.norm""": """decoder.layers.6.norm""", """decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""", """decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""", """decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""", """decoder.model.9.convtr.norm""": """decoder.layers.9.norm""", """decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""", """decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""", """decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""", """decoder.model.12.convtr.norm""": """decoder.layers.12.norm""", """decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""", """decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""", """decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""", """decoder.model.15.conv.norm""": """decoder.layers.15.norm""", } snake_case__ = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } snake_case__ = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } snake_case__ = [] snake_case__ = [] def snake_case__ ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ) -> Dict: for attribute in key.split('''.''' ): A_ : Tuple = getattr(lowerCamelCase__ , lowerCamelCase__ ) if weight_type is not None: A_ : Tuple = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape else: A_ : Union[str, Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": A_ : Union[str, Any] = value elif weight_type == "weight_g": A_ : str = value elif weight_type == "weight_v": A_ : Tuple = value elif weight_type == "bias": A_ : Any = value elif weight_type == "running_mean": A_ : str = value elif weight_type == "running_var": A_ : Union[str, Any] = value elif weight_type == "num_batches_tracked": A_ : Tuple = value elif weight_type == "weight_ih_l0": A_ : Any = value elif weight_type == "weight_hh_l0": A_ : Any = value elif weight_type == "bias_ih_l0": A_ : Any = value elif weight_type == "bias_hh_l0": A_ : Any = value elif weight_type == "weight_ih_l1": A_ : Any = value elif weight_type == "weight_hh_l1": A_ : Dict = value elif weight_type == "bias_ih_l1": A_ : Union[str, Any] = value elif weight_type == "bias_hh_l1": A_ : Tuple = value else: A_ : str = value logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' ) def snake_case__ ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple ) -> int: for key in ignore_keys: if key.endswith('''.*''' ): if name.startswith(key[:-1] ): return True elif ".*." in key: A_ ,A_ : Optional[Any] = key.split('''.*.''' ) if prefix in name and suffix in name: return True elif key in name: return True return False def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] ) -> Optional[Any]: A_ : Optional[Any] = [] if model_name == "encodec_24khz" or "encodec_32khz": A_ : str = MAPPING_24K elif model_name == "encodec_48khz": A_ : List[Any] = MAPPING_48K else: raise ValueError(f'Unsupported model: {model_name}' ) for name, value in orig_dict.items(): if should_ignore(lowerCamelCase__ , lowerCamelCase__ ): logger.info(f'{name} was ignored' ) continue A_ : Any = False for key, mapped_key in MAPPING.items(): if "*" in key: A_ ,A_ : int = key.split('''.*.''' ) if prefix in name and suffix in name: A_ : Tuple = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ): continue A_ : List[str] = True if "*" in mapped_key: A_ : Union[str, Any] = name.split(lowerCamelCase__ )[0].split('''.''' )[-2] A_ : List[str] = mapped_key.replace('''*''' , lowerCamelCase__ ) if "weight_g" in name: A_ : int = '''weight_g''' elif "weight_v" in name: A_ : List[Any] = '''weight_v''' elif "weight_ih_l0" in name: A_ : Dict = '''weight_ih_l0''' elif "weight_hh_l0" in name: A_ : Union[str, Any] = '''weight_hh_l0''' elif "bias_ih_l0" in name: A_ : List[str] = '''bias_ih_l0''' elif "bias_hh_l0" in name: A_ : Any = '''bias_hh_l0''' elif "weight_ih_l1" in name: A_ : Any = '''weight_ih_l1''' elif "weight_hh_l1" in name: A_ : Optional[int] = '''weight_hh_l1''' elif "bias_ih_l1" in name: A_ : Optional[Any] = '''bias_ih_l1''' elif "bias_hh_l1" in name: A_ : Any = '''bias_hh_l1''' elif "bias" in name: A_ : Optional[int] = '''bias''' elif "weight" in name: A_ : List[str] = '''weight''' elif "running_mean" in name: A_ : Optional[Any] = '''running_mean''' elif "running_var" in name: A_ : List[Any] = '''running_var''' elif "num_batches_tracked" in name: A_ : Any = '''num_batches_tracked''' else: A_ : str = None set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) continue if not is_used: unused_weights.append(lowerCamelCase__ ) logger.warning(f'Unused weights: {unused_weights}' ) @torch.no_grad() def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Optional[int]=None , ) -> Optional[Any]: if config_path is not None: A_ : List[str] = EncodecConfig.from_pretrained(lowerCamelCase__ ) else: A_ : Dict = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": A_ : str = [8, 5, 4, 4] A_ : List[Any] = [2.2] A_ : str = 6_4 A_ : Any = 3_2_0_0_0 A_ : Optional[int] = 2_0_4_8 A_ : List[Any] = False A_ : List[str] = False A_ : Dict = False elif model_name == "encodec_48khz": A_ : Dict = [8, 5, 4, 2] A_ : Any = [3.0, 6.0, 12.0, 24.0] A_ : Dict = 4_8_0_0_0 A_ : List[Any] = 2 A_ : List[Any] = False A_ : Union[str, Any] = '''time_group_norm''' A_ : Optional[Any] = True A_ : Optional[Any] = 1.0 A_ : Dict = 0.01 else: raise ValueError(f'Unknown model name: {model_name}' ) A_ : Optional[Any] = EncodecModel(lowerCamelCase__ ) A_ : Union[str, Any] = EncodecFeatureExtractor( feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , ) feature_extractor.save_pretrained(lowerCamelCase__ ) A_ : Optional[Any] = torch.load(lowerCamelCase__ ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights A_ : Tuple = original_checkpoint['''best_state'''] recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) model.save_pretrained(lowerCamelCase__ ) if repo_id: print('''Pushing to the hub...''' ) feature_extractor.push_to_hub(lowerCamelCase__ ) model.push_to_hub(lowerCamelCase__ ) if __name__ == "__main__": snake_case__ = argparse.ArgumentParser() parser.add_argument( """--model""", default="""encodec_24khz""", type=str, help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""", ) parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) snake_case__ = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
4
'''simple docstring''' from collections.abc import Sequence def snake_case__ ( lowerCamelCase__ : Sequence[float] , lowerCamelCase__ : bool = False ) -> float: if not arr: return 0 A_ : Union[str, Any] = 0 if allow_empty_subarrays else float('''-inf''' ) A_ : str = 0.0 for num in arr: A_ : Any = max(0 if allow_empty_subarrays else num , curr_sum + num ) A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() snake_case__ = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'{max_subarray_sum(nums) = }')
4
1
'''simple docstring''' # Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #################################################################################################### # # Note: If when running this conversion script you're getting an exception: # ModuleNotFoundError: No module named 'megatron.model.enums' # you need to tell python where to find the clone of Megatron-LM, e.g.: # # cd /tmp # git clone https://github.com/NVIDIA/Megatron-LM # PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ... # # if you already have it cloned elsewhere, simply adjust the path to the existing path # # If the training was done using a Megatron-LM fork, e.g., # https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one # in your path, i.e., /path/to/Megatron-DeepSpeed/ # import argparse import os import re import zipfile import torch from transformers import AutoTokenizer, GPTaConfig def snake_case__ ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int]=0 ) -> str: # Format the message. if name is None: A_ : Any = None else: A_ : int = '''.''' * max(0 , spaces - 2 ) + '''# {:''' + str(5_0 - spaces ) + '''s}''' A_ : Tuple = fmt.format(lowerCamelCase__ ) # Print and recurse (if needed). if isinstance(lowerCamelCase__ , lowerCamelCase__ ): if msg is not None: print(lowerCamelCase__ ) for k in val.keys(): recursive_print(lowerCamelCase__ , val[k] , spaces + 2 ) elif isinstance(lowerCamelCase__ , torch.Tensor ): print(lowerCamelCase__ , ''':''' , val.size() ) else: print(lowerCamelCase__ , ''':''' , lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] ) -> Tuple: # Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :] # for compatibility with later versions of NVIDIA Megatron-LM. # The inverse operation is performed inside Megatron-LM to read checkpoints: # https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209 # If param is the weight tensor of the self-attention block, the returned tensor # will have to be transposed one more time to be read by HuggingFace GPT2. A_ : Union[str, Any] = param.size() if checkpoint_version == 1.0: # version 1.0 stores [num_heads * hidden_size * num_splits, :] A_ : Optional[Any] = (num_heads, hidden_size, num_splits) + input_shape[1:] A_ : Dict = param.view(*lowerCamelCase__ ) A_ : str = param.transpose(0 , 2 ) A_ : Any = param.transpose(1 , 2 ).contiguous() elif checkpoint_version >= 2.0: # other versions store [num_heads * num_splits * hidden_size, :] A_ : Union[str, Any] = (num_heads, num_splits, hidden_size) + input_shape[1:] A_ : str = param.view(*lowerCamelCase__ ) A_ : Any = param.transpose(0 , 1 ).contiguous() A_ : Any = param.view(*lowerCamelCase__ ) return param def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : List[Any] ) -> Optional[Any]: # The converted output model. A_ : List[Any] = {} # old versions did not store training args A_ : str = input_state_dict.get('''args''' , lowerCamelCase__ ) if ds_args is not None: # do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint # from pprint import pprint # pprint(vars(ds_args)) A_ : List[Any] = ds_args.padded_vocab_size A_ : Dict = ds_args.max_position_embeddings A_ : List[Any] = ds_args.hidden_size A_ : str = ds_args.num_layers A_ : Any = ds_args.num_attention_heads A_ : Any = ds_args.ffn_hidden_size # pprint(config) # The number of heads. A_ : int = config.n_head # The hidden_size per head. A_ : str = config.n_embd // config.n_head # Megatron-LM checkpoint version if "checkpoint_version" in input_state_dict.keys(): A_ : Union[str, Any] = input_state_dict['''checkpoint_version'''] else: A_ : Union[str, Any] = 0.0 # The model. A_ : Optional[int] = input_state_dict['''model'''] # The language model. A_ : str = model['''language_model'''] # The embeddings. A_ : str = lm['''embedding'''] # The word embeddings. A_ : Any = embeddings['''word_embeddings''']['''weight'''] # Truncate the embedding table to vocab_size rows. A_ : Union[str, Any] = word_embeddings[: config.vocab_size, :] A_ : Dict = word_embeddings # The position embeddings. A_ : List[Any] = embeddings['''position_embeddings''']['''weight'''] # Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size] A_ : Tuple = pos_embeddings.size(0 ) if n_positions != config.n_positions: raise ValueError( f'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' ) # Store the position embeddings. A_ : str = pos_embeddings # The transformer. A_ : List[Any] = lm['''transformer'''] if '''transformer''' in lm.keys() else lm['''encoder'''] # The regex to extract layer names. A_ : Optional[Any] = re.compile(R'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''' ) # The simple map of names for "automated" rules. A_ : Any = { '''attention.dense''': '''.attn.c_proj.''', '''self_attention.dense''': '''.attn.c_proj.''', '''mlp.dense_h_to_4h''': '''.mlp.c_fc.''', '''mlp.dense_4h_to_h''': '''.mlp.c_proj.''', } # Extract the layers. for key, val in transformer.items(): # Match the name. A_ : List[Any] = layer_re.match(lowerCamelCase__ ) # Stop if that's not a layer if m is None: break # The index of the layer. A_ : Dict = int(m.group(1 ) ) # The name of the operation. A_ : Dict = m.group(2 ) # Is it a weight or a bias? A_ : Dict = m.group(3 ) # The name of the layer. A_ : Tuple = f'transformer.h.{layer_idx}' # For layernorm(s), simply store the layer norm. if op_name.endswith('''layernorm''' ): A_ : Optional[int] = '''ln_1''' if op_name.startswith('''input''' ) else '''ln_2''' A_ : Union[str, Any] = val # Transpose the QKV matrix. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "weight": # Insert a tensor of 1x1xDxD bias. A_ : Optional[Any] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view( 1 , 1 , lowerCamelCase__ , lowerCamelCase__ ) A_ : Union[str, Any] = causal_mask # Insert a "dummy" tensor for masked_bias. A_ : Optional[int] = torch.tensor(-1e4 , dtype=torch.floataa ) A_ : int = masked_bias A_ : Dict = fix_query_key_value_ordering(lowerCamelCase__ , lowerCamelCase__ , 3 , lowerCamelCase__ , lowerCamelCase__ ) # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D. A_ : str = out_val.transpose(0 , 1 ).contiguous() # Store. A_ : int = out_val # Transpose the bias. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "bias": A_ : Optional[int] = fix_query_key_value_ordering(lowerCamelCase__ , lowerCamelCase__ , 3 , lowerCamelCase__ , lowerCamelCase__ ) # Store. No change of shape. A_ : List[Any] = out_val # Transpose the weights. elif weight_or_bias == "weight": A_ : List[str] = megatron_to_transformers[op_name] A_ : List[Any] = val.transpose(0 , 1 ) # Copy the bias. elif weight_or_bias == "bias": A_ : List[str] = megatron_to_transformers[op_name] A_ : Tuple = val # DEBUG. assert config.n_layer == layer_idx + 1 # The final layernorm. A_ : Tuple = transformer['''final_layernorm.weight'''] A_ : Union[str, Any] = transformer['''final_layernorm.bias'''] # For LM head, transformers' wants the matrix to weight embeddings. A_ : Dict = word_embeddings # It should be done! return output_state_dict def snake_case__ ( ) -> Any: # Create the argument parser. A_ : Tuple = argparse.ArgumentParser() parser.add_argument('''--print-checkpoint-structure''' , action='''store_true''' ) parser.add_argument( '''path_to_checkpoint''' , type=lowerCamelCase__ , help='''Path to the checkpoint file (.zip archive or direct .pt file)''' , ) parser.add_argument( '''--config_file''' , default='''''' , type=lowerCamelCase__ , help='''An optional config json file describing the pre-trained model.''' , ) A_ : Union[str, Any] = parser.parse_args() # Extract the basename. A_ : Optional[int] = os.path.dirname(args.path_to_checkpoint ) # Load the model. # the .zip is very optional, let's keep it for backward compatibility print(f'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' ) if args.path_to_checkpoint.endswith('''.zip''' ): with zipfile.ZipFile(args.path_to_checkpoint , '''r''' ) as checkpoint: with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''' ) as pytorch_dict: A_ : Optional[int] = torch.load(lowerCamelCase__ , map_location='''cpu''' ) else: A_ : Optional[Any] = torch.load(args.path_to_checkpoint , map_location='''cpu''' ) A_ : Optional[Any] = input_state_dict.get('''args''' , lowerCamelCase__ ) # Read the config, or default to the model released by NVIDIA. if args.config_file == "": if ds_args is not None: if ds_args.bias_gelu_fusion: A_ : Union[str, Any] = '''gelu_fast''' elif ds_args.openai_gelu: A_ : Optional[int] = '''gelu_new''' else: A_ : Any = '''gelu''' else: # in the very early days this used to be "gelu_new" A_ : Tuple = '''gelu_new''' # Spell out all parameters in case the defaults change. A_ : List[Any] = GPTaConfig( vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=lowerCamelCase__ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='''cls_index''' , summary_use_proj=lowerCamelCase__ , summary_activation=lowerCamelCase__ , summary_proj_to_labels=lowerCamelCase__ , summary_first_dropout=0.1 , scale_attn_weights=lowerCamelCase__ , use_cache=lowerCamelCase__ , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , ) else: A_ : str = GPTaConfig.from_json_file(args.config_file ) A_ : Optional[int] = ['''GPT2LMHeadModel'''] # Convert. print('''Converting''' ) A_ : Union[str, Any] = convert_megatron_checkpoint(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Print the structure of converted state dict. if args.print_checkpoint_structure: recursive_print(lowerCamelCase__ , lowerCamelCase__ ) # Add tokenizer class info to config # see https://github.com/huggingface/transformers/issues/13906) if ds_args is not None: A_ : List[Any] = ds_args.tokenizer_type if tokenizer_type == "GPT2BPETokenizer": A_ : List[Any] = '''gpt2''' elif tokenizer_type == "PretrainedFromHF": A_ : Union[str, Any] = ds_args.tokenizer_name_or_path else: raise ValueError(f'Unrecognized tokenizer_type {tokenizer_type}' ) else: A_ : int = '''gpt2''' A_ : Any = AutoTokenizer.from_pretrained(lowerCamelCase__ ) A_ : Any = type(lowerCamelCase__ ).__name__ A_ : Optional[Any] = tokenizer_class # Store the config to file. print('''Saving config''' ) config.save_pretrained(lowerCamelCase__ ) # Save tokenizer based on args print(f'Adding {tokenizer_class} tokenizer files' ) tokenizer.save_pretrained(lowerCamelCase__ ) # Store the state_dict to file. A_ : int = os.path.join(lowerCamelCase__ , '''pytorch_model.bin''' ) print(f'Saving checkpoint to "{output_checkpoint_file}"' ) torch.save(lowerCamelCase__ , lowerCamelCase__ ) #################################################################################################### if __name__ == "__main__": main() ####################################################################################################
4
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json""" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'speech_to_text_2' _lowerCAmelCase = ['past_key_values'] _lowerCAmelCase = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : Optional[Any] , _lowerCamelCase : Optional[Any]=10000 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : int=2048 , _lowerCamelCase : Dict=4 , _lowerCamelCase : str=0.0 , _lowerCamelCase : int=True , _lowerCamelCase : int="relu" , _lowerCamelCase : Any=256 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : int=2 , _lowerCamelCase : List[str]=True , _lowerCamelCase : str=1 , _lowerCamelCase : List[Any]=0 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : Tuple=1024 , **_lowerCamelCase : int , ): """simple docstring""" A_ : Optional[int] = vocab_size A_ : Tuple = d_model A_ : List[str] = decoder_ffn_dim A_ : str = decoder_layers A_ : Any = decoder_attention_heads A_ : int = dropout A_ : str = attention_dropout A_ : Optional[int] = activation_dropout A_ : str = activation_function A_ : List[Any] = init_std A_ : Union[str, Any] = decoder_layerdrop A_ : Any = use_cache A_ : Optional[Any] = decoder_layers A_ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True A_ : Optional[Any] = max_target_positions super().__init__( pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
4
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case__ = { """configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""], """processing_git""": ["""GitProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ = [ """GIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GitForCausalLM""", """GitModel""", """GitPreTrainedModel""", """GitVisionModel""", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys snake_case__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
4
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING snake_case__ = logging.get_logger(__name__) snake_case__ = { """microsoft/table-transformer-detection""": ( """https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json""" ), } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'table-transformer' _lowerCAmelCase = ['past_key_values'] _lowerCAmelCase = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : Any , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Dict=None , _lowerCamelCase : int=3 , _lowerCamelCase : Any=100 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : Any=8 , _lowerCamelCase : Dict=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : int=8 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : Union[str, Any]=256 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : str=0.02 , _lowerCamelCase : Tuple=1.0 , _lowerCamelCase : Dict=False , _lowerCamelCase : str="sine" , _lowerCamelCase : str="resnet50" , _lowerCamelCase : Any=True , _lowerCamelCase : List[str]=False , _lowerCamelCase : Any=1 , _lowerCamelCase : int=5 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Optional[int]=1 , _lowerCamelCase : Any=1 , _lowerCamelCase : Dict=5 , _lowerCamelCase : str=2 , _lowerCamelCase : Union[str, Any]=0.1 , **_lowerCamelCase : int , ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) A_ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : str = backbone_config.get('''model_type''' ) A_ : Optional[int] = CONFIG_MAPPING[backbone_model_type] A_ : List[str] = config_class.from_dict(_lowerCamelCase ) # set timm attributes to None A_ ,A_ ,A_ : Union[str, Any] = None, None, None A_ : Optional[Any] = use_timm_backbone A_ : Optional[int] = backbone_config A_ : Optional[Any] = num_channels A_ : Dict = num_queries A_ : str = d_model A_ : List[str] = encoder_ffn_dim A_ : int = encoder_layers A_ : Optional[Any] = encoder_attention_heads A_ : List[str] = decoder_ffn_dim A_ : Any = decoder_layers A_ : List[str] = decoder_attention_heads A_ : Tuple = dropout A_ : Optional[Any] = attention_dropout A_ : Any = activation_dropout A_ : List[Any] = activation_function A_ : Dict = init_std A_ : Any = init_xavier_std A_ : List[Any] = encoder_layerdrop A_ : int = decoder_layerdrop A_ : Any = encoder_layers A_ : List[str] = auxiliary_loss A_ : List[Any] = position_embedding_type A_ : Optional[Any] = backbone A_ : Tuple = use_pretrained_backbone A_ : List[Any] = dilation # Hungarian matcher A_ : List[str] = class_cost A_ : str = bbox_cost A_ : Union[str, Any] = giou_cost # Loss coefficients A_ : Any = mask_loss_coefficient A_ : Optional[int] = dice_loss_coefficient A_ : Dict = bbox_loss_coefficient A_ : int = giou_loss_coefficient A_ : int = eos_coefficient super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase ) @property def _a ( self : List[Any] ): """simple docstring""" return self.encoder_attention_heads @property def _a ( self : Any ): """simple docstring""" return self.d_model class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = version.parse('1.11' ) @property def _a ( self : Tuple ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ] ) @property def _a ( self : Optional[int] ): """simple docstring""" return 1E-5 @property def _a ( self : str ): """simple docstring""" return 12
4
1
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 snake_case__ = get_tests_dir("""fixtures""") class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : List[str] ): """simple docstring""" A_ : List[Any] = mock.Mock() A_ : List[str] = 500 A_ : Tuple = {} A_ : int = HTTPError A_ : Optional[Any] = {} # Download this model to make sure it's in the cache. A_ : Tuple = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' , return_value=_lowerCamelCase ) as mock_head: A_ : List[Any] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # This check we did call the fake head request mock_head.assert_called() def _a ( self : Tuple ): """simple docstring""" A_ : Tuple = ViTImageProcessor.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' ) def _a ( self : Dict ): """simple docstring""" with self.assertRaises(_lowerCamelCase ): # config is in subfolder, the following should not work without specifying the subfolder A_ : Any = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' ) A_ : Tuple = AutoImageProcessor.from_pretrained( '''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' ) self.assertIsNotNone(_lowerCamelCase ) @is_staging_test class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @classmethod def _a ( cls : Tuple ): """simple docstring""" A_ : int = TOKEN HfFolder.save_token(_lowerCamelCase ) @classmethod def _a ( cls : str ): """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-image-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' ) except HTTPError: pass def _a ( self : List[Any] ): """simple docstring""" A_ : Dict = ViTImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token ) A_ : Optional[int] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( _lowerCamelCase , repo_id='''test-image-processor''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token ) A_ : List[Any] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : int = ViTImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token ) A_ : List[str] = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( _lowerCamelCase , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token ) A_ : Any = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) def _a ( self : Optional[Any] ): """simple docstring""" CustomImageProcessor.register_for_auto_class() A_ : Any = CustomImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , ) A_ : str = AutoImageProcessor.from_pretrained( f'{USER}/test-dynamic-image-processor' , trust_remote_code=_lowerCamelCase ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
4
'''simple docstring''' import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : List[str]=3 , _lowerCamelCase : Any=32 , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : int=10 , _lowerCamelCase : Union[str, Any]=[8, 16, 32, 64] , _lowerCamelCase : Dict=[1, 1, 2, 1] , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Any="relu" , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Dict=["stage2", "stage3", "stage4"] , _lowerCamelCase : Union[str, Any]=[2, 3, 4] , _lowerCamelCase : Tuple=1 , ): """simple docstring""" A_ : List[str] = parent A_ : List[str] = batch_size A_ : Union[str, Any] = image_size A_ : Tuple = num_channels A_ : Any = embeddings_size A_ : int = hidden_sizes A_ : Optional[Any] = depths A_ : List[Any] = is_training A_ : Optional[int] = use_labels A_ : int = hidden_act A_ : Tuple = num_labels A_ : Union[str, Any] = scope A_ : List[Any] = len(_lowerCamelCase ) A_ : Union[str, Any] = out_features A_ : List[Any] = out_indices A_ : Dict = num_groups def _a ( self : Optional[int] ): """simple docstring""" A_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : Union[str, Any] = None if self.use_labels: A_ : Any = ids_tensor([self.batch_size] , self.num_labels ) A_ : Any = self.get_config() return config, pixel_values, labels def _a ( self : Union[str, Any] ): """simple docstring""" return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def _a ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] ): """simple docstring""" A_ : Any = BitModel(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : int = model(_lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _a ( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] ): """simple docstring""" A_ : Dict = self.num_labels A_ : Optional[Any] = BitForImageClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self : Any , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[Any] ): """simple docstring""" A_ : List[Any] = BitBackbone(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : int = model(_lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None A_ : Optional[Any] = None A_ : int = BitBackbone(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : Optional[int] = model(_lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _a ( self : List[Any] ): """simple docstring""" A_ : Union[str, Any] = self.prepare_config_and_inputs() A_ ,A_ ,A_ : Union[str, Any] = config_and_inputs A_ : str = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class UpperCamelCase_ (a__, a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () _lowerCAmelCase = ( {'feature-extraction': BitModel, 'image-classification': BitForImageClassification} if is_torch_available() else {} ) _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False def _a ( self : Optional[Any] ): """simple docstring""" A_ : List[str] = BitModelTester(self ) A_ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _a ( self : List[Any] ): """simple docstring""" return @unittest.skip(reason='''Bit does not output attentions''' ) def _a ( self : str ): """simple docstring""" pass @unittest.skip(reason='''Bit does not use inputs_embeds''' ) def _a ( self : Union[str, Any] ): """simple docstring""" pass @unittest.skip(reason='''Bit does not support input and output embeddings''' ) def _a ( self : Any ): """simple docstring""" pass def _a ( self : List[Any] ): """simple docstring""" A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Dict = model_class(_lowerCamelCase ) A_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : int = [*signature.parameters.keys()] A_ : Union[str, Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ ,A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : str = model_class(config=_lowerCamelCase ) for name, module in model.named_modules(): if isinstance(_lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) def _a ( self : int ): """simple docstring""" def check_hidden_states_output(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : int ): A_ : Union[str, Any] = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() with torch.no_grad(): A_ : Union[str, Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) A_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A_ : List[Any] = self.model_tester.num_stages self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common() A_ : Tuple = ['''preactivation''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: A_ : Tuple = layer_type A_ : Optional[Any] = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A_ : List[str] = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) @unittest.skip(reason='''Bit does not use feedforward chunking''' ) def _a ( self : Tuple ): """simple docstring""" pass def _a ( self : str ): """simple docstring""" A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) @slow def _a ( self : Union[str, Any] ): """simple docstring""" for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : List[Any] = BitModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def snake_case__ ( ) -> Optional[int]: A_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : List[Any] ): """simple docstring""" return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _a ( self : Dict ): """simple docstring""" A_ : Optional[int] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowerCamelCase ) A_ : Union[str, Any] = self.default_image_processor A_ : Optional[int] = prepare_img() A_ : int = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase ) # forward pass with torch.no_grad(): A_ : Union[str, Any] = model(**_lowerCamelCase ) # verify the logits A_ : Dict = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) A_ : Tuple = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) ) @require_torch class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = (BitBackbone,) if is_torch_available() else () _lowerCAmelCase = BitConfig _lowerCAmelCase = False def _a ( self : List[str] ): """simple docstring""" A_ : Union[str, Any] = BitModelTester(self )
4
1
'''simple docstring''' from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def snake_case__ ( lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple ) -> Dict: A_ : Optional[int] = k_size // 2 A_ ,A_ : Optional[int] = mgrid[0 - center : k_size - center, 0 - center : k_size - center] A_ : int = 1 / (2 * pi * sigma) * exp(-(square(lowerCamelCase__ ) + square(lowerCamelCase__ )) / (2 * square(lowerCamelCase__ )) ) return g def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] ) -> Dict: A_ ,A_ : Optional[Any] = image.shape[0], image.shape[1] # dst image height and width A_ : int = height - k_size + 1 A_ : int = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows A_ : Dict = zeros((dst_height * dst_width, k_size * k_size) ) A_ : List[Any] = 0 for i, j in product(range(lowerCamelCase__ ) , range(lowerCamelCase__ ) ): A_ : Any = ravel(image[i : i + k_size, j : j + k_size] ) A_ : List[str] = window row += 1 # turn the kernel into shape(k*k, 1) A_ : str = gen_gaussian_kernel(lowerCamelCase__ , lowerCamelCase__ ) A_ : List[Any] = ravel(lowerCamelCase__ ) # reshape and get the dst image A_ : List[str] = dot(lowerCamelCase__ , lowerCamelCase__ ).reshape(lowerCamelCase__ , lowerCamelCase__ ).astype(lowerCamelCase__ ) return dst if __name__ == "__main__": # read original image snake_case__ = imread(R"""../image_data/lena.jpg""") # turn image in gray scale value snake_case__ = cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size snake_case__ = gaussian_filter(gray, 3, sigma=1) snake_case__ = gaussian_filter(gray, 5, sigma=0.8) # show result images imshow("""gaussian filter with 3x3 mask""", gaussianaxa) imshow("""gaussian filter with 5x5 mask""", gaussianaxa) waitKey()
4
'''simple docstring''' import pprint import requests snake_case__ = """https://zenquotes.io/api""" def snake_case__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '''/today''' ).json() def snake_case__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '''/random''' ).json() if __name__ == "__main__": snake_case__ = random_quotes() pprint.pprint(response)
4
1
'''simple docstring''' import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def snake_case__ ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] ) -> List[str]: # load base model A_ : Dict = StableDiffusionPipeline.from_pretrained(lowerCamelCase__ , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors A_ : Tuple = load_file(lowerCamelCase__ ) A_ : Any = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: A_ : Tuple = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' ) A_ : List[str] = pipeline.text_encoder else: A_ : Tuple = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' ) A_ : List[Any] = pipeline.unet # find the target layer A_ : List[str] = layer_infos.pop(0 ) while len(lowerCamelCase__ ) > -1: try: A_ : List[Any] = curr_layer.__getattr__(lowerCamelCase__ ) if len(lowerCamelCase__ ) > 0: A_ : Optional[Any] = layer_infos.pop(0 ) elif len(lowerCamelCase__ ) == 0: break except Exception: if len(lowerCamelCase__ ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: A_ : int = layer_infos.pop(0 ) A_ : Optional[int] = [] if "lora_down" in key: pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) ) pair_keys.append(lowerCamelCase__ ) else: pair_keys.append(lowerCamelCase__ ) pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: A_ : Dict = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) A_ : Tuple = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(lowerCamelCase__ , lowerCamelCase__ ).unsqueeze(2 ).unsqueeze(3 ) else: A_ : Optional[int] = state_dict[pair_keys[0]].to(torch.floataa ) A_ : Optional[int] = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(lowerCamelCase__ , lowerCamelCase__ ) # update visited list for item in pair_keys: visited.append(lowerCamelCase__ ) return pipeline if __name__ == "__main__": snake_case__ = argparse.ArgumentParser() parser.add_argument( """--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format.""" ) parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors""" ) parser.add_argument( """--lora_prefix_text_encoder""", default="""lora_te""", type=str, help="""The prefix of text encoder weight in safetensors""", ) parser.add_argument("""--alpha""", default=0.7_5, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""") parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""" ) parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") snake_case__ = parser.parse_args() snake_case__ = args.base_model_path snake_case__ = args.checkpoint_path snake_case__ = args.dump_path snake_case__ = args.lora_prefix_unet snake_case__ = args.lora_prefix_text_encoder snake_case__ = args.alpha snake_case__ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) snake_case__ = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
4
'''simple docstring''' from __future__ import annotations class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[int] , _lowerCamelCase : int ): """simple docstring""" A_ : Union[str, Any] = order # a_{0} ... a_{k} A_ : Union[str, Any] = [1.0] + [0.0] * order # b_{0} ... b_{k} A_ : int = [1.0] + [0.0] * order # x[n-1] ... x[n-k] A_ : str = [0.0] * self.order # y[n-1] ... y[n-k] A_ : Optional[Any] = [0.0] * self.order def _a ( self : Dict , _lowerCamelCase : list[float] , _lowerCamelCase : list[float] ): """simple docstring""" if len(_lowerCamelCase ) < self.order: A_ : Any = [1.0, *a_coeffs] if len(_lowerCamelCase ) != self.order + 1: A_ : List[Any] = ( f'Expected a_coeffs to have {self.order + 1} elements ' f'for {self.order}-order filter, got {len(_lowerCamelCase )}' ) raise ValueError(_lowerCamelCase ) if len(_lowerCamelCase ) != self.order + 1: A_ : Union[str, Any] = ( f'Expected b_coeffs to have {self.order + 1} elements ' f'for {self.order}-order filter, got {len(_lowerCamelCase )}' ) raise ValueError(_lowerCamelCase ) A_ : Tuple = a_coeffs A_ : str = b_coeffs def _a ( self : Tuple , _lowerCamelCase : float ): """simple docstring""" A_ : Any = 0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) A_ : str = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0] A_ : Optional[Any] = self.input_history[:-1] A_ : List[str] = self.output_history[:-1] A_ : Tuple = sample A_ : Tuple = result return result
4
1
'''simple docstring''' from __future__ import annotations from typing import Any def snake_case__ ( lowerCamelCase__ : list ) -> int: if not postfix_notation: return 0 A_ : List[str] = {'''+''', '''-''', '''*''', '''/'''} A_ : list[Any] = [] for token in postfix_notation: if token in operations: A_ ,A_ : List[str] = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(lowerCamelCase__ ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
4
'''simple docstring''' class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[Any] , _lowerCamelCase : Union[str, Any] ): """simple docstring""" A_ : Union[str, Any] = val A_ : Tuple = None A_ : Any = None def _a ( self : Tuple , _lowerCamelCase : List[Any] ): """simple docstring""" if self.val: if val < self.val: if self.left is None: A_ : int = Node(_lowerCamelCase ) else: self.left.insert(_lowerCamelCase ) elif val > self.val: if self.right is None: A_ : List[str] = Node(_lowerCamelCase ) else: self.right.insert(_lowerCamelCase ) else: A_ : Any = val def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] ) -> str: # Recursive traversal if root: inorder(root.left , lowerCamelCase__ ) res.append(root.val ) inorder(root.right , lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> Tuple: # Build BST if len(lowerCamelCase__ ) == 0: return arr A_ : Dict = Node(arr[0] ) for i in range(1 , len(lowerCamelCase__ ) ): root.insert(arr[i] ) # Traverse BST in order. A_ : Tuple = [] inorder(lowerCamelCase__ , lowerCamelCase__ ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
4
1
'''simple docstring''' import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = CpmAntTokenizer _lowerCAmelCase = False def _a ( self : Union[str, Any] ): """simple docstring""" super().setUp() A_ : List[str] = [ '''<d>''', '''</d>''', '''<s>''', '''</s>''', '''</_>''', '''<unk>''', '''<pad>''', '''</n>''', '''我''', '''是''', '''C''', '''P''', '''M''', '''A''', '''n''', '''t''', ] A_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) @tooslow def _a ( self : Tuple ): """simple docstring""" A_ : Union[str, Any] = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' ) A_ : Optional[Any] = '''今天天气真好!''' A_ : str = ['''今天''', '''天气''', '''真''', '''好''', '''!'''] A_ : int = tokenizer.tokenize(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) A_ : List[str] = '''今天天气真好!''' A_ : List[Any] = [tokenizer.bos_token] + tokens A_ : Any = [6, 9802, 14962, 2082, 831, 244] self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase ) A_ : List[Any] = tokenizer.decode(_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase )
4
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : list ) -> list: if len(lowerCamelCase__ ) <= 1: return [tuple(lowerCamelCase__ )] A_ : List[str] = [] def generate(lowerCamelCase__ : int , lowerCamelCase__ : list ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , lowerCamelCase__ ) for i in range(k - 1 ): if k % 2 == 0: # k is even A_ ,A_ : Optional[int] = arr[k - 1], arr[i] else: # k is odd A_ ,A_ : Union[str, Any] = arr[k - 1], arr[0] generate(k - 1 , lowerCamelCase__ ) generate(len(lowerCamelCase__ ) , lowerCamelCase__ ) return res if __name__ == "__main__": snake_case__ = input("""Enter numbers separated by a comma:\n""").strip() snake_case__ = [int(item) for item in user_input.split(""",""")] print(heaps(arr))
4
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""", """roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""", } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'roberta' def __init__( self : Any , _lowerCamelCase : Tuple=50265 , _lowerCamelCase : int=768 , _lowerCamelCase : Optional[Any]=12 , _lowerCamelCase : Tuple=12 , _lowerCamelCase : List[str]=3072 , _lowerCamelCase : Union[str, Any]="gelu" , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : Tuple=0.1 , _lowerCamelCase : Tuple=512 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : List[Any]=0.02 , _lowerCamelCase : Any=1E-12 , _lowerCamelCase : Optional[Any]=1 , _lowerCamelCase : Union[str, Any]=0 , _lowerCamelCase : Optional[Any]=2 , _lowerCamelCase : Optional[Any]="absolute" , _lowerCamelCase : str=True , _lowerCamelCase : Optional[Any]=None , **_lowerCamelCase : Any , ): """simple docstring""" super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase ) A_ : Dict = vocab_size A_ : List[Any] = hidden_size A_ : str = num_hidden_layers A_ : int = num_attention_heads A_ : Dict = hidden_act A_ : List[str] = intermediate_size A_ : Optional[int] = hidden_dropout_prob A_ : Union[str, Any] = attention_probs_dropout_prob A_ : Union[str, Any] = max_position_embeddings A_ : Dict = type_vocab_size A_ : int = initializer_range A_ : int = layer_norm_eps A_ : Union[str, Any] = position_embedding_type A_ : Any = use_cache A_ : int = classifier_dropout class UpperCamelCase_ (a__ ): """simple docstring""" @property def _a ( self : Tuple ): """simple docstring""" if self.task == "multiple-choice": A_ : List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: A_ : Optional[Any] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
4
'''simple docstring''' import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : Dict ): """simple docstring""" A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : Tuple = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : Dict = -1 A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : Any = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) A_ : List[str] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: A_ : List[str] = TextStreamer(_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer A_ : Dict = cs.out[:-1] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : Dict = -1 A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : Optional[int] = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) A_ : str = tokenizer.decode(greedy_ids[0] ) A_ : int = TextIteratorStreamer(_lowerCamelCase ) A_ : List[Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} A_ : List[Any] = Thread(target=model.generate , kwargs=_lowerCamelCase ) thread.start() A_ : List[Any] = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : int ): """simple docstring""" A_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : List[str] = -1 A_ : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : Tuple = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) A_ : Tuple = greedy_ids[:, input_ids.shape[1] :] A_ : Tuple = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: A_ : Any = TextStreamer(_lowerCamelCase , skip_prompt=_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer A_ : Any = cs.out[:-1] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : List[Any] ): """simple docstring""" A_ : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' ) A_ : Tuple = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowerCamelCase ) A_ : List[Any] = -1 A_ : Union[str, Any] = torch.ones((1, 5) , device=_lowerCamelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: A_ : List[Any] = TextStreamer(_lowerCamelCase , skip_special_tokens=_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=1 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token A_ : List[str] = cs.out[:-1] # Remove the final "\n" A_ : List[Any] = tokenizer(_lowerCamelCase , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : Union[str, Any] = -1 A_ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : List[str] = TextIteratorStreamer(_lowerCamelCase , timeout=0.0_01 ) A_ : str = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} A_ : List[str] = Thread(target=model.generate , kwargs=_lowerCamelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_lowerCamelCase ): A_ : str = '''''' for new_text in streamer: streamer_text += new_text
4
1
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() snake_case__ = logging.get_logger(__name__) def snake_case__ ( lowerCamelCase__ : Optional[Any] ) -> Any: A_ : List[str] = OrderedDict() for key, value in state_dict.items(): if key.startswith('''module.encoder''' ): A_ : Dict = key.replace('''module.encoder''' , '''glpn.encoder''' ) if key.startswith('''module.decoder''' ): A_ : Dict = key.replace('''module.decoder''' , '''decoder.stages''' ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 A_ : Dict = key[key.find('''patch_embed''' ) + len('''patch_embed''' )] A_ : Tuple = key.replace(f'patch_embed{idx}' , f'patch_embeddings.{int(lowerCamelCase__ )-1}' ) if "norm" in key: A_ : Dict = key.replace('''norm''' , '''layer_norm''' ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 A_ : Union[str, Any] = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )] A_ : Any = key.replace(f'layer_norm{idx}' , f'layer_norm.{int(lowerCamelCase__ )-1}' ) if "layer_norm1" in key: A_ : Union[str, Any] = key.replace('''layer_norm1''' , '''layer_norm_1''' ) if "layer_norm2" in key: A_ : int = key.replace('''layer_norm2''' , '''layer_norm_2''' ) if "block" in key: # replace for example block1 by block.0 A_ : Any = key[key.find('''block''' ) + len('''block''' )] A_ : Optional[int] = key.replace(f'block{idx}' , f'block.{int(lowerCamelCase__ )-1}' ) if "attn.q" in key: A_ : Any = key.replace('''attn.q''' , '''attention.self.query''' ) if "attn.proj" in key: A_ : Optional[Any] = key.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in key: A_ : List[str] = key.replace('''attn''' , '''attention.self''' ) if "fc1" in key: A_ : Tuple = key.replace('''fc1''' , '''dense1''' ) if "fc2" in key: A_ : str = key.replace('''fc2''' , '''dense2''' ) if "linear_pred" in key: A_ : List[Any] = key.replace('''linear_pred''' , '''classifier''' ) if "linear_fuse" in key: A_ : List[str] = key.replace('''linear_fuse.conv''' , '''linear_fuse''' ) A_ : str = key.replace('''linear_fuse.bn''' , '''batch_norm''' ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 A_ : Union[str, Any] = key[key.find('''linear_c''' ) + len('''linear_c''' )] A_ : int = key.replace(f'linear_c{idx}' , f'linear_c.{int(lowerCamelCase__ )-1}' ) if "bot_conv" in key: A_ : str = key.replace('''bot_conv''' , '''0.convolution''' ) if "skip_conv1" in key: A_ : Dict = key.replace('''skip_conv1''' , '''1.convolution''' ) if "skip_conv2" in key: A_ : Dict = key.replace('''skip_conv2''' , '''2.convolution''' ) if "fusion1" in key: A_ : Tuple = key.replace('''fusion1''' , '''1.fusion''' ) if "fusion2" in key: A_ : Dict = key.replace('''fusion2''' , '''2.fusion''' ) if "fusion3" in key: A_ : int = key.replace('''fusion3''' , '''3.fusion''' ) if "fusion" in key and "conv" in key: A_ : List[str] = key.replace('''conv''' , '''convolutional_layer''' ) if key.startswith('''module.last_layer_depth''' ): A_ : List[str] = key.replace('''module.last_layer_depth''' , '''head.head''' ) A_ : Tuple = value return new_state_dict def snake_case__ ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int ) -> Tuple: # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) A_ : List[Any] = state_dict.pop(f'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' ) A_ : List[str] = state_dict.pop(f'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' ) # next, add keys and values (in that order) to the state dict A_ : str = kv_weight[ : config.hidden_sizes[i], : ] A_ : Any = kv_bias[: config.hidden_sizes[i]] A_ : Any = kv_weight[ config.hidden_sizes[i] :, : ] A_ : List[str] = kv_bias[config.hidden_sizes[i] :] def snake_case__ ( ) -> Tuple: A_ : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' A_ : List[Any] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ) return image @torch.no_grad() def snake_case__ ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : int=False , lowerCamelCase__ : Union[str, Any]=None ) -> str: A_ : List[str] = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] ) # load image processor (only resize + rescale) A_ : List[Any] = GLPNImageProcessor() # prepare image A_ : Optional[int] = prepare_img() A_ : List[Any] = image_processor(images=lowerCamelCase__ , return_tensors='''pt''' ).pixel_values logger.info('''Converting model...''' ) # load original state dict A_ : List[str] = torch.load(lowerCamelCase__ , map_location=torch.device('''cpu''' ) ) # rename keys A_ : Optional[int] = rename_keys(lowerCamelCase__ ) # key and value matrices need special treatment read_in_k_v(lowerCamelCase__ , lowerCamelCase__ ) # create HuggingFace model and load state dict A_ : Tuple = GLPNForDepthEstimation(lowerCamelCase__ ) model.load_state_dict(lowerCamelCase__ ) model.eval() # forward pass A_ : Union[str, Any] = model(lowerCamelCase__ ) A_ : Optional[int] = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: A_ : Tuple = torch.tensor( [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] ) elif "kitti" in model_name: A_ : Union[str, Any] = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ) else: raise ValueError(f'Unknown model name: {model_name}' ) A_ : List[Any] = torch.Size([1, 4_8_0, 6_4_0] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) print('''Looks ok!''' ) # finally, push to hub if required if push_to_hub: logger.info('''Pushing model and image processor to the hub...''' ) model.push_to_hub( repo_path_or_name=Path(lowerCamelCase__ , lowerCamelCase__ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=lowerCamelCase__ , ) image_processor.push_to_hub( repo_path_or_name=Path(lowerCamelCase__ , lowerCamelCase__ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=lowerCamelCase__ , ) if __name__ == "__main__": snake_case__ = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub.""" ) parser.add_argument( """--model_name""", default="""glpn-kitti""", type=str, help="""Name of the model in case you're pushing to the hub.""", ) snake_case__ = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
4
'''simple docstring''' import heapq def snake_case__ ( lowerCamelCase__ : dict ) -> set[int]: A_ : list[list] = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(lowerCamelCase__ , [-1 * len(lowerCamelCase__ ), (key, value)] ) # chosen_vertices = set of chosen vertices A_ : str = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices A_ : Tuple = heapq.heappop(lowerCamelCase__ )[1][0] chosen_vertices.add(lowerCamelCase__ ) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: A_ : List[str] = elem[1][1].index(lowerCamelCase__ ) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(lowerCamelCase__ ) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() snake_case__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
4
1
'''simple docstring''' import warnings from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401 warnings.warn( """The `inpainting.py` script is outdated. Please use directly `from diffusers import""" """ StableDiffusionInpaintPipeline` instead.""" )
4
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() snake_case__ = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) snake_case__ = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias')) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias')) rename_keys.append( (F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight', F'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias', F'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias')) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias')) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""), ("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ] ) def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ) -> Optional[Any]: A_ : Tuple = state_dict.pop(lowerCamelCase__ ) A_ : Optional[Any] = val def snake_case__ ( lowerCamelCase__ : Dict ) -> Any: A_ : int = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: A_ : int = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' ) A_ : List[str] = value else: A_ : Optional[int] = value return new_state_dict def snake_case__ ( lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]: A_ : Any = '''''' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) A_ : Tuple = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' ) A_ : Dict = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict A_ : str = in_proj_weight[:2_5_6, :] A_ : Optional[Any] = in_proj_bias[:2_5_6] A_ : Dict = in_proj_weight[2_5_6:5_1_2, :] A_ : Tuple = in_proj_bias[2_5_6:5_1_2] A_ : Tuple = in_proj_weight[-2_5_6:, :] A_ : Optional[int] = in_proj_bias[-2_5_6:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention A_ : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' ) A_ : Dict = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict A_ : List[str] = in_proj_weight[:2_5_6, :] A_ : int = in_proj_bias[:2_5_6] A_ : Any = in_proj_weight[2_5_6:5_1_2, :] A_ : List[str] = in_proj_bias[2_5_6:5_1_2] A_ : Union[str, Any] = in_proj_weight[-2_5_6:, :] A_ : Optional[Any] = in_proj_bias[-2_5_6:] # read in weights + bias of input projection layer of cross-attention A_ : Tuple = state_dict.pop( f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' ) A_ : Optional[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' ) # next, add query, keys and values (in that order) of cross-attention to the state dict A_ : Dict = in_proj_weight_cross_attn[:2_5_6, :] A_ : Tuple = in_proj_bias_cross_attn[:2_5_6] A_ : int = in_proj_weight_cross_attn[2_5_6:5_1_2, :] A_ : List[str] = in_proj_bias_cross_attn[2_5_6:5_1_2] A_ : Any = in_proj_weight_cross_attn[-2_5_6:, :] A_ : Any = in_proj_bias_cross_attn[-2_5_6:] def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ) -> Dict: A_ ,A_ : int = image.size A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ ) A_ : Optional[Any] = 8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0 A_ : Union[str, Any] = target_max_size / current_max_size A_ : Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def snake_case__ ( lowerCamelCase__ : Tuple ) -> str: A_ : Any = F.to_tensor(lowerCamelCase__ ) A_ : Optional[Any] = F.normalize(lowerCamelCase__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ) return image @torch.no_grad() def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> str: logger.info('''Converting model...''' ) # load original state dict A_ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location='''cpu''' ) # rename keys for src, dest in rename_keys: rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) A_ : str = rename_backbone_keys(lowerCamelCase__ ) # query, key and value matrices need special treatment read_in_q_k_v(lowerCamelCase__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them A_ : List[Any] = '''model.''' for key in state_dict.copy().keys(): if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ): A_ : List[Any] = state_dict.pop(lowerCamelCase__ ) A_ : str = val # create HuggingFace model and load state dict A_ : Union[str, Any] = TableTransformerConfig( backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: A_ : Dict = 1_5 A_ : Dict = 2 A_ : int = {0: '''table''', 1: '''table rotated'''} A_ : List[str] = idalabel A_ : Optional[int] = {v: k for k, v in idalabel.items()} else: A_ : Union[str, Any] = 1_2_5 A_ : Optional[Any] = 6 A_ : Optional[Any] = { 0: '''table''', 1: '''table column''', 2: '''table row''', 3: '''table column header''', 4: '''table projected row header''', 5: '''table spanning cell''', } A_ : int = idalabel A_ : Tuple = {v: k for k, v in idalabel.items()} A_ : Optional[Any] = DetrImageProcessor( format='''coco_detection''' , max_size=8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0 ) A_ : int = TableTransformerForObjectDetection(lowerCamelCase__ ) model.load_state_dict(lowerCamelCase__ ) model.eval() # verify our conversion A_ : Optional[int] = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png''' A_ : Union[str, Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=lowerCamelCase__ ) A_ : Tuple = Image.open(lowerCamelCase__ ).convert('''RGB''' ) A_ : int = normalize(resize(lowerCamelCase__ , lowerCamelCase__ ) ).unsqueeze(0 ) A_ : str = model(lowerCamelCase__ ) if "detection" in checkpoint_url: A_ : str = (1, 1_5, 3) A_ : int = torch.tensor( [[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] ) A_ : Tuple = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] ) else: A_ : Optional[int] = (1, 1_2_5, 7) A_ : Dict = torch.tensor( [[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] ) A_ : Any = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' ) Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) model.save_pretrained(lowerCamelCase__ ) image_processor.save_pretrained(lowerCamelCase__ ) if push_to_hub: # Push model to HF hub logger.info('''Pushing model to the hub...''' ) A_ : List[Any] = ( '''microsoft/table-transformer-detection''' if '''detection''' in checkpoint_url else '''microsoft/table-transformer-structure-recognition''' ) model.push_to_hub(lowerCamelCase__ ) image_processor.push_to_hub(lowerCamelCase__ ) if __name__ == "__main__": snake_case__ = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", type=str, choices=[ """https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", """https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""", ], help="""URL of the Table Transformer checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) snake_case__ = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
4
1
'''simple docstring''' import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL snake_case__ = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""") def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : tuple , lowerCamelCase__ : Path , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[int]=False , ) -> Union[str, Any]: output_path.parent.mkdir(parents=lowerCamelCase__ , exist_ok=lowerCamelCase__ ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( lowerCamelCase__ , lowerCamelCase__ , f=output_path.as_posix() , input_names=lowerCamelCase__ , output_names=lowerCamelCase__ , dynamic_axes=lowerCamelCase__ , do_constant_folding=lowerCamelCase__ , use_external_data_format=lowerCamelCase__ , enable_onnx_checker=lowerCamelCase__ , opset_version=lowerCamelCase__ , ) else: export( lowerCamelCase__ , lowerCamelCase__ , f=output_path.as_posix() , input_names=lowerCamelCase__ , output_names=lowerCamelCase__ , dynamic_axes=lowerCamelCase__ , do_constant_folding=lowerCamelCase__ , opset_version=lowerCamelCase__ , ) @torch.no_grad() def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : int , lowerCamelCase__ : bool = False ) -> List[str]: A_ : Tuple = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): A_ : Tuple = '''cuda''' elif fpaa and not torch.cuda.is_available(): raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' ) else: A_ : str = '''cpu''' A_ : Dict = Path(lowerCamelCase__ ) # VAE DECODER A_ : Optional[int] = AutoencoderKL.from_pretrained(model_path + '''/vae''' ) A_ : Tuple = vae_decoder.config.latent_channels # forward only through the decoder part A_ : Any = vae_decoder.decode onnx_export( lowerCamelCase__ , model_args=( torch.randn(1 , lowerCamelCase__ , 2_5 , 2_5 ).to(device=lowerCamelCase__ , dtype=lowerCamelCase__ ), False, ) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={ '''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, } , opset=lowerCamelCase__ , ) del vae_decoder if __name__ == "__main__": snake_case__ = argparse.ArgumentParser() parser.add_argument( """--model_path""", type=str, required=True, help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""", ) parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--opset""", default=14, type=int, help="""The version of the ONNX operator set to use.""", ) parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""") snake_case__ = parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print("""SD: Done: ONNX""")
4
'''simple docstring''' import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) snake_case__ = logging.getLogger(__name__) @dataclass(frozen=a__ ) class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None @dataclass(frozen=a__ ) class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None if is_torch_available(): import torch from torch.utils.data import Dataset class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 42 def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : List[Any]=False , _lowerCamelCase : bool = False , ): """simple docstring""" A_ : Optional[int] = hans_processors[task]() A_ : int = os.path.join( _lowerCamelCase , '''cached_{}_{}_{}_{}'''.format( '''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(_lowerCamelCase ) , _lowerCamelCase , ) , ) A_ : Dict = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) A_ ,A_ : List[str] = label_list[2], label_list[1] A_ : Optional[int] = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. A_ : str = cached_features_file + '''.lock''' with FileLock(_lowerCamelCase ): if os.path.exists(_lowerCamelCase ) and not overwrite_cache: logger.info(f'Loading features from cached file {cached_features_file}' ) A_ : List[str] = torch.load(_lowerCamelCase ) else: logger.info(f'Creating features from dataset file at {data_dir}' ) A_ : Optional[int] = ( processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase ) ) logger.info('''Training examples: %s''' , len(_lowerCamelCase ) ) A_ : Optional[int] = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) logger.info('''Saving features into cached file %s''' , _lowerCamelCase ) torch.save(self.features , _lowerCamelCase ) def __len__( self : List[str] ): """simple docstring""" return len(self.features ) def __getitem__( self : List[str] , _lowerCamelCase : Optional[int] ): """simple docstring""" return self.features[i] def _a ( self : str ): """simple docstring""" return self.label_list if is_tf_available(): import tensorflow as tf class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = 128 , _lowerCamelCase : Dict=False , _lowerCamelCase : bool = False , ): """simple docstring""" A_ : Optional[int] = hans_processors[task]() A_ : Optional[int] = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) A_ ,A_ : Union[str, Any] = label_list[2], label_list[1] A_ : Tuple = label_list A_ : Optional[int] = processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase ) A_ : Tuple = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ): if ex_index % 10000 == 0: logger.info('''Writing example %d of %d''' % (ex_index, len(_lowerCamelCase )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) A_ : List[Any] = tf.data.Dataset.from_generator( _lowerCamelCase , ( { '''example_id''': tf.intaa, '''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa, }, tf.intaa, ) , ( { '''example_id''': tf.TensorShape([] ), '''input_ids''': tf.TensorShape([None, None] ), '''attention_mask''': tf.TensorShape([None, None] ), '''token_type_ids''': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def _a ( self : Any ): """simple docstring""" return self.dataset def __len__( self : Dict ): """simple docstring""" return len(self.features ) def __getitem__( self : Optional[int] , _lowerCamelCase : List[str] ): """simple docstring""" return self.features[i] def _a ( self : Tuple ): """simple docstring""" return self.label_list class UpperCamelCase_ (a__ ): """simple docstring""" def _a ( self : List[str] , _lowerCamelCase : Union[str, Any] ): """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_train_set.txt''' ) ) , '''train''' ) def _a ( self : List[str] , _lowerCamelCase : Tuple ): """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' ) def _a ( self : Any ): """simple docstring""" return ["contradiction", "entailment", "neutral"] def _a ( self : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ): """simple docstring""" A_ : Tuple = [] for i, line in enumerate(_lowerCamelCase ): if i == 0: continue A_ : str = '''%s-%s''' % (set_type, line[0]) A_ : Optional[Any] = line[5] A_ : Union[str, Any] = line[6] A_ : List[str] = line[7][2:] if line[7].startswith('''ex''' ) else line[7] A_ : str = line[0] examples.append(InputExample(guid=_lowerCamelCase , text_a=_lowerCamelCase , text_b=_lowerCamelCase , label=_lowerCamelCase , pairID=_lowerCamelCase ) ) return examples def snake_case__ ( lowerCamelCase__ : List[InputExample] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : PreTrainedTokenizer , ) -> int: A_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase__ )} A_ : Optional[Any] = [] for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase__ ) , desc='''convert examples to features''' ): if ex_index % 1_0_0_0_0 == 0: logger.info('''Writing example %d''' % (ex_index) ) A_ : Optional[int] = tokenizer( example.text_a , example.text_b , add_special_tokens=lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' , truncation=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , ) A_ : List[str] = label_map[example.label] if example.label in label_map else 0 A_ : Tuple = int(example.pairID ) features.append(InputFeatures(**lowerCamelCase__ , label=lowerCamelCase__ , pairID=lowerCamelCase__ ) ) for i, example in enumerate(examples[:5] ): logger.info('''*** Example ***''' ) logger.info(f'guid: {example}' ) logger.info(f'features: {features[i]}' ) return features snake_case__ = { """hans""": 3, } snake_case__ = { """hans""": HansProcessor, }
4
1
'''simple docstring''' import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin snake_case__ = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = AlbertTokenizer _lowerCAmelCase = AlbertTokenizerFast _lowerCAmelCase = True _lowerCAmelCase = True _lowerCAmelCase = True def _a ( self : int ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing A_ : str = AlbertTokenizer(_lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def _a ( self : int , _lowerCamelCase : Any ): """simple docstring""" A_ : Any = '''this is a test''' A_ : Dict = '''this is a test''' return input_text, output_text def _a ( self : List[str] ): """simple docstring""" A_ : Union[str, Any] = '''<pad>''' A_ : Optional[int] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase ) def _a ( self : int ): """simple docstring""" A_ : int = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''▁eloquent''' ) self.assertEqual(len(_lowerCamelCase ) , 30000 ) def _a ( self : Any ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 30000 ) def _a ( self : str ): """simple docstring""" if not self.test_rust_tokenizer: return A_ : Any = self.get_tokenizer() A_ : List[str] = self.get_rust_tokenizer() A_ : List[str] = '''I was born in 92000, and this is falsé.''' A_ : List[Any] = tokenizer.tokenize(_lowerCamelCase ) A_ : List[Any] = rust_tokenizer.tokenize(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) A_ : List[Any] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) A_ : Any = rust_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) A_ : str = self.get_rust_tokenizer() A_ : List[Any] = tokenizer.encode(_lowerCamelCase ) A_ : List[str] = rust_tokenizer.encode(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : Union[str, Any] = AlbertTokenizer(_lowerCamelCase , keep_accents=_lowerCamelCase ) A_ : Optional[Any] = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_lowerCamelCase , ['''▁this''', '''▁is''', '''▁a''', '''▁test'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [48, 25, 21, 1289] ) A_ : Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _lowerCamelCase , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.'''] ) A_ : Tuple = tokenizer.convert_tokens_to_ids(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] ) A_ : Optional[int] = tokenizer.convert_ids_to_tokens(_lowerCamelCase ) self.assertListEqual( _lowerCamelCase , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.'''] , ) def _a ( self : Any ): """simple docstring""" A_ : Dict = AlbertTokenizer(_lowerCamelCase ) A_ : List[Any] = tokenizer.encode('''sequence builders''' ) A_ : Union[str, Any] = tokenizer.encode('''multi-sequence build''' ) A_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase ) A_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def _a ( self : List[Any] ): """simple docstring""" A_ : Tuple = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_lowerCamelCase , model_name='''albert-base-v2''' , revision='''6b6560eaf5ff2e250b00c50f380c5389a9c2d82e''' , )
4
'''simple docstring''' import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline snake_case__ = datasets.utils.logging.get_logger(__name__) @dataclass class UpperCamelCase_ (datasets.BuilderConfig ): """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = "utf-8" _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = True # deprecated _lowerCAmelCase = None # deprecated _lowerCAmelCase = 1_0 << 2_0 # 10MB _lowerCAmelCase = None class UpperCamelCase_ (datasets.ArrowBasedBuilder ): """simple docstring""" _lowerCAmelCase = JsonConfig def _a ( self : int ): """simple docstring""" if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' ) A_ : List[Any] = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' ) if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' ) return datasets.DatasetInfo(features=self.config.features ) def _a ( self : Any , _lowerCamelCase : List[str] ): """simple docstring""" if not self.config.data_files: raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' ) A_ : int = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_lowerCamelCase , (str, list, tuple) ): A_ : Union[str, Any] = data_files if isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : List[str] = [files] A_ : List[Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] A_ : Tuple = [] for split_name, files in data_files.items(): if isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : int = [files] A_ : Union[str, Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files] splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'''files''': files} ) ) return splits def _a ( self : int , _lowerCamelCase : pa.Table ): """simple docstring""" if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): A_ : Optional[int] = self.config.features.arrow_schema.field(_lowerCamelCase ).type A_ : Optional[int] = pa_table.append_column(_lowerCamelCase , pa.array([None] * len(_lowerCamelCase ) , type=_lowerCamelCase ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example A_ : str = table_cast(_lowerCamelCase , self.config.features.arrow_schema ) return pa_table def _a ( self : List[str] , _lowerCamelCase : int ): """simple docstring""" for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: A_ : int = json.load(_lowerCamelCase ) # We keep only the field we are interested in A_ : List[str] = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(_lowerCamelCase , (list, tuple) ): A_ : int = set().union(*[row.keys() for row in dataset] ) A_ : List[str] = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys} else: A_ : Tuple = dataset A_ : Dict = pa.Table.from_pydict(_lowerCamelCase ) yield file_idx, self._cast_table(_lowerCamelCase ) # If the file has one json object per line else: with open(_lowerCamelCase , '''rb''' ) as f: A_ : int = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small A_ : int = max(self.config.chunksize // 32 , 16 << 10 ) A_ : int = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: A_ : Any = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(_lowerCamelCase ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": A_ : Optional[Any] = batch.decode(self.config.encoding , errors=_lowerCamelCase ).encode('''utf-8''' ) try: while True: try: A_ : List[Any] = paj.read_json( io.BytesIO(_lowerCamelCase ) , read_options=paj.ReadOptions(block_size=_lowerCamelCase ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(_lowerCamelCase , pa.ArrowInvalid ) and "straddling" not in str(_lowerCamelCase ) or block_size > len(_lowerCamelCase ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f'Batch of {len(_lowerCamelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( _lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: A_ : Optional[Any] = json.load(_lowerCamelCase ) except json.JSONDecodeError: logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(_lowerCamelCase , _lowerCamelCase ): # list is the only sequence type supported in JSON try: A_ : Optional[int] = set().union(*[row.keys() for row in dataset] ) A_ : Tuple = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys} A_ : int = pa.Table.from_pydict(_lowerCamelCase ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' ) raise ValueError(f'Not able to read records in the JSON file at {file}.' ) from None yield file_idx, self._cast_table(_lowerCamelCase ) break else: logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' ) raise ValueError( f'Not able to read records in the JSON file at {file}. ' f'You should probably indicate the field of the JSON file containing your records. ' f'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. ' f'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_lowerCamelCase ) batch_idx += 1
4
1
'''simple docstring''' import argparse import struct import unittest class UpperCamelCase_ : """simple docstring""" def __init__( self : Any , _lowerCamelCase : bytes ): """simple docstring""" A_ : Dict = data # Initialize hash values A_ : Optional[Any] = [ 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19, ] # Initialize round constants A_ : Tuple = [ 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2, ] A_ : Union[str, Any] = self.preprocessing(self.data ) self.final_hash() @staticmethod def _a ( _lowerCamelCase : bytes ): """simple docstring""" A_ : Union[str, Any] = B'''\x80''' + (B'''\x00''' * (63 - (len(_lowerCamelCase ) + 8) % 64)) A_ : Optional[int] = struct.pack('''>Q''' , (len(_lowerCamelCase ) * 8) ) return data + padding + big_endian_integer def _a ( self : List[Any] ): """simple docstring""" A_ : Optional[int] = [ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data ) , 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers A_ : List[str] = list(struct.unpack('''>16L''' , _lowerCamelCase ) ) # add 48 0-ed integers words += [0] * 48 A_ ,A_ ,A_ ,A_ ,A_ ,A_ ,A_ ,A_ : str = self.hashes for index in range(0 , 64 ): if index > 15: # modify the zero-ed indexes at the end of the array A_ : int = ( self.ror(words[index - 15] , 7 ) ^ self.ror(words[index - 15] , 18 ) ^ (words[index - 15] >> 3) ) A_ : Union[str, Any] = ( self.ror(words[index - 2] , 17 ) ^ self.ror(words[index - 2] , 19 ) ^ (words[index - 2] >> 10) ) A_ : List[Any] = ( words[index - 16] + sa + words[index - 7] + sa ) % 0x100000000 # Compression A_ : Any = self.ror(_lowerCamelCase , 6 ) ^ self.ror(_lowerCamelCase , 11 ) ^ self.ror(_lowerCamelCase , 25 ) A_ : List[Any] = (e & f) ^ ((~e & 0xffffffff) & g) A_ : List[Any] = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x100000000 A_ : str = self.ror(_lowerCamelCase , 2 ) ^ self.ror(_lowerCamelCase , 13 ) ^ self.ror(_lowerCamelCase , 22 ) A_ : Optional[Any] = (a & b) ^ (a & c) ^ (b & c) A_ : Dict = (sa + maj) % 0x100000000 A_ ,A_ ,A_ ,A_ ,A_ ,A_ ,A_ ,A_ : str = ( g, f, e, ((d + tempa) % 0x100000000), c, b, a, ((tempa + tempa) % 0x100000000), ) A_ : Optional[Any] = [a, b, c, d, e, f, g, h] # Modify final values A_ : Union[str, Any] = [ ((element + mutated_hash_values[index]) % 0x100000000) for index, element in enumerate(self.hashes ) ] A_ : Optional[int] = ''''''.join([hex(_lowerCamelCase )[2:].zfill(8 ) for value in self.hashes] ) def _a ( self : int , _lowerCamelCase : int , _lowerCamelCase : int ): """simple docstring""" return 0xffffffff & (value << (32 - rotations)) | (value >> rotations) class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : Tuple ): """simple docstring""" import hashlib A_ : str = bytes('''Test String''' , '''utf-8''' ) self.assertEqual(SHAaaa(_lowerCamelCase ).hash , hashlib.shaaaa(_lowerCamelCase ).hexdigest() ) def snake_case__ ( ) -> None: import doctest doctest.testmod() A_ : str = argparse.ArgumentParser() parser.add_argument( '''-s''' , '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , ) parser.add_argument( '''-f''' , '''--file''' , dest='''input_file''' , help='''Hash contents of a file''' ) A_ : Optional[int] = parser.parse_args() A_ : Optional[int] = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , '''rb''' ) as f: A_ : Optional[Any] = f.read() else: A_ : Optional[Any] = bytes(lowerCamelCase__ , '''utf-8''' ) print(SHAaaa(lowerCamelCase__ ).hash ) if __name__ == "__main__": main()
4
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices snake_case__ = logging.get_logger(__name__) snake_case__ = { """microsoft/swin-tiny-patch4-window7-224""": ( """https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json""" ), # See all Swin models at https://huggingface.co/models?filter=swin } class UpperCamelCase_ (a__, a__ ): """simple docstring""" _lowerCAmelCase = 'swin' _lowerCAmelCase = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : Any , _lowerCamelCase : Optional[Any]=224 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Tuple=96 , _lowerCamelCase : List[Any]=[2, 2, 6, 2] , _lowerCamelCase : List[str]=[3, 6, 12, 24] , _lowerCamelCase : List[Any]=7 , _lowerCamelCase : Optional[int]=4.0 , _lowerCamelCase : List[str]=True , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Any=0.0 , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : Tuple=False , _lowerCamelCase : Dict=0.02 , _lowerCamelCase : Optional[Any]=1E-5 , _lowerCamelCase : Any=32 , _lowerCamelCase : Tuple=None , _lowerCamelCase : Any=None , **_lowerCamelCase : str , ): """simple docstring""" super().__init__(**_lowerCamelCase ) A_ : Optional[int] = image_size A_ : Optional[int] = patch_size A_ : Optional[int] = num_channels A_ : Any = embed_dim A_ : List[Any] = depths A_ : Any = len(_lowerCamelCase ) A_ : List[Any] = num_heads A_ : Tuple = window_size A_ : Tuple = mlp_ratio A_ : Dict = qkv_bias A_ : List[str] = hidden_dropout_prob A_ : List[str] = attention_probs_dropout_prob A_ : Any = drop_path_rate A_ : List[Any] = hidden_act A_ : Tuple = use_absolute_embeddings A_ : int = layer_norm_eps A_ : Optional[Any] = initializer_range A_ : Union[str, Any] = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model A_ : str = int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) ) A_ : str = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(_lowerCamelCase ) + 1 )] A_ ,A_ : Optional[Any] = get_aligned_output_features_output_indices( out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names ) class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = version.parse('1.11' ) @property def _a ( self : str ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def _a ( self : Union[str, Any] ): """simple docstring""" return 1E-4
4
1
'''simple docstring''' import numpy as np from transformers import BatchFeature from transformers.testing_utils import require_tf, require_torch from .test_feature_extraction_common import FeatureExtractionSavingTestMixin class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = None @property def _a ( self : int ): """simple docstring""" return self.feat_extract_tester.prepare_feat_extract_dict() def _a ( self : Any ): """simple docstring""" A_ : str = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(_lowerCamelCase , '''feature_size''' ) ) self.assertTrue(hasattr(_lowerCamelCase , '''sampling_rate''' ) ) self.assertTrue(hasattr(_lowerCamelCase , '''padding_value''' ) ) def _a ( self : Dict ): """simple docstring""" A_ : str = self.feat_extract_tester.prepare_inputs_for_common() A_ : Dict = self.feature_extraction_class(**self.feat_extract_dict ) A_ : Optional[int] = feat_extract.model_input_names[0] A_ : List[str] = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(_lowerCamelCase ) == len(_lowerCamelCase ) for x, y in zip(_lowerCamelCase , processed_features[input_name] ) ) ) A_ : Dict = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCamelCase ) A_ : Union[str, Any] = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' ) A_ : int = processed_features[input_name] if len(batch_features_input.shape ) < 3: A_ : str = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_torch def _a ( self : Any ): """simple docstring""" A_ : List[str] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCamelCase ) A_ : Any = self.feature_extraction_class(**self.feat_extract_dict ) A_ : int = feat_extract.model_input_names[0] A_ : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' ) A_ : List[Any] = processed_features[input_name] if len(batch_features_input.shape ) < 3: A_ : str = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_tf def _a ( self : List[Any] ): """simple docstring""" A_ : Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCamelCase ) A_ : int = self.feature_extraction_class(**self.feat_extract_dict ) A_ : List[Any] = feat_extract.model_input_names[0] A_ : Dict = BatchFeature({input_name: speech_inputs} , tensor_type='''tf''' ) A_ : str = processed_features[input_name] if len(batch_features_input.shape ) < 3: A_ : Union[str, Any] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) def _a ( self : Union[str, Any] , _lowerCamelCase : Dict=False ): """simple docstring""" def _inputs_have_equal_length(_lowerCamelCase : str ): A_ : Tuple = len(input[0] ) for input_slice in input[1:]: if len(_lowerCamelCase ) != length: return False return True def _inputs_are_equal(_lowerCamelCase : str , _lowerCamelCase : Tuple ): if len(_lowerCamelCase ) != len(_lowerCamelCase ): return False for input_slice_a, input_slice_a in zip(_lowerCamelCase , _lowerCamelCase ): if not np.allclose(np.asarray(_lowerCamelCase ) , np.asarray(_lowerCamelCase ) , atol=1E-3 ): return False return True A_ : Tuple = self.feature_extraction_class(**self.feat_extract_dict ) A_ : Any = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowerCamelCase ) A_ : Optional[Any] = feat_extract.model_input_names[0] A_ : Dict = BatchFeature({input_name: speech_inputs} ) A_ : Optional[int] = self.feat_extract_tester.seq_length_diff A_ : Optional[int] = self.feat_extract_tester.max_seq_length + pad_diff A_ : List[str] = self.feat_extract_tester.min_seq_length A_ : Tuple = self.feat_extract_tester.batch_size A_ : Any = self.feat_extract_tester.feature_size # test padding for List[int] + numpy A_ : List[str] = feat_extract.pad(_lowerCamelCase , padding=_lowerCamelCase ) A_ : str = input_a[input_name] A_ : Union[str, Any] = feat_extract.pad(_lowerCamelCase , padding='''longest''' ) A_ : Optional[Any] = input_a[input_name] A_ : Optional[int] = feat_extract.pad(_lowerCamelCase , padding='''max_length''' , max_length=len(speech_inputs[-1] ) ) A_ : Union[str, Any] = input_a[input_name] A_ : Optional[int] = feat_extract.pad(_lowerCamelCase , padding='''longest''' , return_tensors='''np''' ) A_ : Union[str, Any] = input_a[input_name] # max_length parameter has to be provided when setting `padding="max_length"` with self.assertRaises(_lowerCamelCase ): feat_extract.pad(_lowerCamelCase , padding='''max_length''' )[input_name] A_ : List[str] = feat_extract.pad( _lowerCamelCase , padding='''max_length''' , max_length=_lowerCamelCase , return_tensors='''np''' ) A_ : List[Any] = input_a[input_name] self.assertFalse(_inputs_have_equal_length(_lowerCamelCase ) ) self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) ) self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) ) self.assertTrue(_inputs_are_equal(_lowerCamelCase , _lowerCamelCase ) ) self.assertTrue(len(input_a[0] ) == pad_min_length ) self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff ) self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) ) self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size ) # test padding for `pad_to_multiple_of` for List[int] + numpy A_ : List[str] = feat_extract.pad(_lowerCamelCase , pad_to_multiple_of=10 ) A_ : int = input_a[input_name] A_ : Optional[Any] = feat_extract.pad(_lowerCamelCase , padding='''longest''' , pad_to_multiple_of=10 ) A_ : Optional[Any] = input_a[input_name] A_ : Any = feat_extract.pad( _lowerCamelCase , padding='''max_length''' , pad_to_multiple_of=10 , max_length=_lowerCamelCase ) A_ : Tuple = input_a[input_name] A_ : Any = feat_extract.pad( _lowerCamelCase , padding='''max_length''' , pad_to_multiple_of=10 , max_length=_lowerCamelCase , return_tensors='''np''' , ) A_ : List[str] = input_a[input_name] self.assertTrue(all(len(_lowerCamelCase ) % 10 == 0 for x in input_a ) ) self.assertTrue(_inputs_are_equal(_lowerCamelCase , _lowerCamelCase ) ) A_ : Union[str, Any] = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10 self.assertTrue(all(len(_lowerCamelCase ) == expected_mult_pad_length for x in input_a ) ) self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == feature_size ) # Check padding value is correct A_ : Tuple = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum() self.assertTrue( abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 ) self.assertTrue( abs( np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) ) < 1E-3 ) self.assertTrue( abs( np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) ) < 1E-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) ) < 1E-3 ) def _a ( self : Any , _lowerCamelCase : str=False ): """simple docstring""" def _inputs_have_equal_length(_lowerCamelCase : str ): A_ : Dict = len(input[0] ) for input_slice in input[1:]: if len(_lowerCamelCase ) != length: return False return True def _inputs_are_equal(_lowerCamelCase : Tuple , _lowerCamelCase : int ): if len(_lowerCamelCase ) != len(_lowerCamelCase ): return False for input_slice_a, input_slice_a in zip(_lowerCamelCase , _lowerCamelCase ): if not np.allclose(np.asarray(_lowerCamelCase ) , np.asarray(_lowerCamelCase ) , atol=1E-3 ): return False return True A_ : int = self.feature_extraction_class(**self.feat_extract_dict ) A_ : int = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowerCamelCase ) A_ : List[str] = feat_extract.model_input_names[0] A_ : Union[str, Any] = BatchFeature({input_name: speech_inputs} ) # truncate to smallest A_ : List[str] = feat_extract.pad( _lowerCamelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , truncation=_lowerCamelCase ) A_ : Any = input_a[input_name] A_ : Optional[Any] = feat_extract.pad(_lowerCamelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) ) A_ : List[str] = input_a[input_name] self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) ) self.assertFalse(_inputs_have_equal_length(_lowerCamelCase ) ) # truncate to smallest with np A_ : Optional[Any] = feat_extract.pad( _lowerCamelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' , truncation=_lowerCamelCase , ) A_ : str = input_a[input_name] A_ : str = feat_extract.pad( _lowerCamelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' ) A_ : Optional[Any] = input_a[input_name] self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) ) self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(_lowerCamelCase ) ) # truncate to middle A_ : Optional[Any] = feat_extract.pad( _lowerCamelCase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=_lowerCamelCase , return_tensors='''np''' , ) A_ : str = input_a[input_name] A_ : Optional[int] = feat_extract.pad( _lowerCamelCase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=_lowerCamelCase ) A_ : Union[str, Any] = input_a[input_name] A_ : Optional[int] = feat_extract.pad( _lowerCamelCase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , return_tensors='''np''' ) A_ : Union[str, Any] = input_a[input_name] self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) ) self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) ) self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) ) self.assertTrue(_inputs_are_equal(_lowerCamelCase , _lowerCamelCase ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(_lowerCamelCase ) ) self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) ) # padding has to be max_length when setting `truncation=True` with self.assertRaises(_lowerCamelCase ): feat_extract.pad(_lowerCamelCase , truncation=_lowerCamelCase )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(_lowerCamelCase ): feat_extract.pad(_lowerCamelCase , padding='''longest''' , truncation=_lowerCamelCase )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(_lowerCamelCase ): feat_extract.pad(_lowerCamelCase , padding='''longest''' , truncation=_lowerCamelCase )[input_name] # max_length parameter has to be provided when setting `truncation=True` and padding="max_length" with self.assertRaises(_lowerCamelCase ): feat_extract.pad(_lowerCamelCase , padding='''max_length''' , truncation=_lowerCamelCase )[input_name] # test truncation for `pad_to_multiple_of` for List[int] + numpy A_ : str = 12 A_ : str = feat_extract.pad( _lowerCamelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowerCamelCase , truncation=_lowerCamelCase , ) A_ : Tuple = input_a[input_name] A_ : str = feat_extract.pad( _lowerCamelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowerCamelCase , ) A_ : Any = input_a[input_name] # retrieve expected_length as multiple of pad_to_multiple_of A_ : Tuple = len(speech_inputs[0] ) if expected_length % pad_to_multiple_of != 0: A_ : Optional[int] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of self.assertTrue(len(input_a[0] ) == expected_length ) self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) ) self.assertFalse(_inputs_have_equal_length(_lowerCamelCase ) ) def _a ( self : Optional[int] ): """simple docstring""" self._check_padding(numpify=_lowerCamelCase ) def _a ( self : str ): """simple docstring""" self._check_padding(numpify=_lowerCamelCase ) def _a ( self : str ): """simple docstring""" self._check_truncation(numpify=_lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" self._check_truncation(numpify=_lowerCamelCase ) @require_torch def _a ( self : List[Any] ): """simple docstring""" A_ : Tuple = self.feature_extraction_class(**self.feat_extract_dict ) A_ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common() A_ : Union[str, Any] = feat_extract.model_input_names[0] A_ : Optional[int] = BatchFeature({input_name: speech_inputs} ) A_ : str = feat_extract.pad(_lowerCamelCase , padding='''longest''' , return_tensors='''np''' )[input_name] A_ : Any = feat_extract.pad(_lowerCamelCase , padding='''longest''' , return_tensors='''pt''' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 ) @require_tf def _a ( self : Optional[int] ): """simple docstring""" A_ : int = self.feature_extraction_class(**self.feat_extract_dict ) A_ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common() A_ : Union[str, Any] = feat_extract.model_input_names[0] A_ : Optional[Any] = BatchFeature({input_name: speech_inputs} ) A_ : Tuple = feat_extract.pad(_lowerCamelCase , padding='''longest''' , return_tensors='''np''' )[input_name] A_ : Optional[int] = feat_extract.pad(_lowerCamelCase , padding='''longest''' , return_tensors='''tf''' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 ) def _a ( self : int ): """simple docstring""" A_ : Optional[Any] = self.feat_extract_dict A_ : Tuple = True A_ : Optional[Any] = self.feature_extraction_class(**_lowerCamelCase ) A_ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common() A_ : List[str] = [len(_lowerCamelCase ) for x in speech_inputs] A_ : int = feat_extract.model_input_names[0] A_ : Tuple = BatchFeature({input_name: speech_inputs} ) A_ : Any = feat_extract.pad(_lowerCamelCase , padding='''longest''' , return_tensors='''np''' ) self.assertIn('''attention_mask''' , _lowerCamelCase ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _lowerCamelCase ) def _a ( self : Dict ): """simple docstring""" A_ : Any = self.feat_extract_dict A_ : Dict = True A_ : List[str] = self.feature_extraction_class(**_lowerCamelCase ) A_ : str = self.feat_extract_tester.prepare_inputs_for_common() A_ : Dict = [len(_lowerCamelCase ) for x in speech_inputs] A_ : Union[str, Any] = feat_extract.model_input_names[0] A_ : str = BatchFeature({input_name: speech_inputs} ) A_ : Optional[Any] = min(_lowerCamelCase ) A_ : int = feat_extract.pad( _lowerCamelCase , padding='''max_length''' , max_length=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors='''np''' ) self.assertIn('''attention_mask''' , _lowerCamelCase ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
4
'''simple docstring''' from __future__ import annotations def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> list[int]: A_ : int = 0 A_ : str = len(lowerCamelCase__ ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: A_ : Tuple = i + 1 else: A_ : List[str] = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(F'{two_pointer([2, 7, 11, 15], 9) = }')
4
1
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = 42 def __init__( self : Union[str, Any] , _lowerCamelCase : UNetaDModel , _lowerCamelCase : KarrasVeScheduler ): """simple docstring""" super().__init__() self.register_modules(unet=_lowerCamelCase , scheduler=_lowerCamelCase ) @torch.no_grad() def __call__( self : Dict , _lowerCamelCase : int = 1 , _lowerCamelCase : int = 50 , _lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCamelCase : Optional[str] = "pil" , _lowerCamelCase : bool = True , **_lowerCamelCase : List[str] , ): """simple docstring""" A_ : Dict = self.unet.config.sample_size A_ : Any = (batch_size, 3, img_size, img_size) A_ : Optional[int] = self.unet # sample x_0 ~ N(0, sigma_0^2 * I) A_ : int = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=self.device ) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(_lowerCamelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # here sigma_t == t_i from the paper A_ : List[str] = self.scheduler.schedule[t] A_ : Optional[int] = self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat A_ ,A_ : Dict = self.scheduler.add_noise_to_input(_lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. A_ : Dict = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev A_ : List[str] = self.scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. A_ : Optional[int] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample A_ : List[Any] = self.scheduler.step_correct( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , step_output.prev_sample , step_output['''derivative'''] , ) A_ : Union[str, Any] = step_output.prev_sample A_ : List[str] = (sample / 2 + 0.5).clamp(0 , 1 ) A_ : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": A_ : int = self.numpy_to_pil(_lowerCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=_lowerCamelCase )
4
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool: return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(lowerCamelCase__ ) ) def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool: # Base Case if index == len(lowerCamelCase__ ): return True # Recursive Step for i in range(lowerCamelCase__ ): if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ): # Color current vertex A_ : int = i # Validate coloring if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ): return True # Backtrack A_ : str = -1 return False def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[int]: A_ : List[str] = [-1] * len(lowerCamelCase__ ) if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ): return colored_vertices return []
4
1
'''simple docstring''' snake_case__ = { "joule": 1.0, "kilojoule": 10_00, "megajoule": 1_00_00_00, "gigajoule": 10_00_00_00_00, "wattsecond": 1.0, "watthour": 36_00, "kilowatthour": 3_60_00_00, "newtonmeter": 1.0, "calorie_nutr": 41_86.8, "kilocalorie_nutr": 4_18_68_00.00, "electronvolt": 1.6_0217_6634e-19, "britishthermalunit_it": 10_55.0_55_85, "footpound": 1.3_5_5_8_1_8, } def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : float ) -> float: if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: A_ : Dict = ( f'Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n' f'Valid values are: {", ".join(lowerCamelCase__ )}' ) raise ValueError(lowerCamelCase__ ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
4
'''simple docstring''' from __future__ import annotations from PIL import Image # Define glider example snake_case__ = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example snake_case__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def snake_case__ ( lowerCamelCase__ : list[list[int]] ) -> list[list[int]]: A_ : str = [] for i in range(len(lowerCamelCase__ ) ): A_ : Optional[Any] = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours A_ : Optional[int] = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(lowerCamelCase__ ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(lowerCamelCase__ ) - 1: neighbour_count += cells[i + 1][j] if i < len(lowerCamelCase__ ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. A_ : List[str] = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(lowerCamelCase__ ) return next_generation def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[Image.Image]: A_ : List[Any] = [] for _ in range(lowerCamelCase__ ): # Create output image A_ : Optional[int] = Image.new('''RGB''' , (len(cells[0] ), len(lowerCamelCase__ )) ) A_ : int = img.load() # Save cells to image for x in range(len(lowerCamelCase__ ) ): for y in range(len(cells[0] ) ): A_ : Optional[Any] = 2_5_5 - cells[y][x] * 2_5_5 A_ : str = (colour, colour, colour) # Save image images.append(lowerCamelCase__ ) A_ : Optional[int] = new_generation(lowerCamelCase__ ) return images if __name__ == "__main__": snake_case__ = generate_images(GLIDER, 16) images[0].save("""out.gif""", save_all=True, append_images=images[1:])
4
1
'''simple docstring''' import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) snake_case__ = logging.getLogger(__name__) snake_case__ = """Hello world! cécé herlolip""" snake_case__ = namedtuple( """BertAbsConfig""", [ """temp_dir""", """large""", """use_bert_emb""", """finetune_bert""", """encoder""", """share_emb""", """max_pos""", """enc_layers""", """enc_hidden_size""", """enc_heads""", """enc_ff_size""", """enc_dropout""", """dec_layers""", """dec_hidden_size""", """dec_heads""", """dec_ff_size""", """dec_dropout""", ], ) def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] ) -> Tuple: A_ : str = BertAbsConfig( temp_dir='''.''' , finetune_bert=lowerCamelCase__ , large=lowerCamelCase__ , share_emb=lowerCamelCase__ , use_bert_emb=lowerCamelCase__ , encoder='''bert''' , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , ) A_ : List[Any] = torch.load(lowerCamelCase__ , lambda lowerCamelCase__ , lowerCamelCase__ : storage ) A_ : int = AbsSummarizer(lowerCamelCase__ , torch.device('''cpu''' ) , lowerCamelCase__ ) original.eval() A_ : Tuple = BertAbsSummarizer(lowerCamelCase__ , torch.device('''cpu''' ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info('''convert the model''' ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info('''Make sure that the models\' outputs are identical''' ) A_ : int = BertTokenizer.from_pretrained('''bert-base-uncased''' ) # prepare the model inputs A_ : Optional[int] = tokenizer.encode('''This is sample éàalj\'-.''' ) encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(lowerCamelCase__ )) ) A_ : List[str] = torch.tensor(lowerCamelCase__ ).unsqueeze(0 ) A_ : Dict = tokenizer.encode('''This is sample 3 éàalj\'-.''' ) decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(lowerCamelCase__ )) ) A_ : List[Any] = torch.tensor(lowerCamelCase__ ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass A_ : str = encoder_input_ids A_ : Dict = decoder_input_ids A_ : Any = None A_ : Optional[int] = None A_ : int = None A_ : Optional[Any] = None A_ : Optional[Any] = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical A_ : Union[str, Any] = original(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )[0] A_ : Optional[int] = original.generator(lowerCamelCase__ ) A_ : Union[str, Any] = new_model( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )[0] A_ : str = new_model.generator(lowerCamelCase__ ) A_ : Optional[int] = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print('''Maximum absolute difference beween weights: {:.2f}'''.format(lowerCamelCase__ ) ) A_ : Tuple = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print('''Maximum absolute difference beween weights: {:.2f}'''.format(lowerCamelCase__ ) ) A_ : Any = torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) if are_identical: logging.info('''all weights are equal up to 1e-3''' ) else: raise ValueError('''the weights are different. The new model is likely different from the original one.''' ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info('''saving the model\'s state dictionary''' ) torch.save( new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' ) if __name__ == "__main__": snake_case__ = argparse.ArgumentParser() parser.add_argument( """--bertabs_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""", ) snake_case__ = parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
4
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Any = tempfile.mkdtemp() A_ : List[Any] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) A_ : Tuple = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], '''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], } A_ : List[Any] = os.path.join(self.tmpdirname , _lowerCamelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Dict , **_lowerCamelCase : Tuple ): """simple docstring""" return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def _a ( self : Optional[int] , **_lowerCamelCase : Optional[int] ): """simple docstring""" return BertTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def _a ( self : Optional[Any] , **_lowerCamelCase : Tuple ): """simple docstring""" return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def _a ( self : int ): """simple docstring""" A_ : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] A_ : Any = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _a ( self : int ): """simple docstring""" A_ : Tuple = self.get_tokenizer() A_ : Tuple = self.get_rust_tokenizer() A_ : Dict = self.get_image_processor() A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) A_ : str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase ) A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) A_ : List[Any] = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase ) self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase ) self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase ) def _a ( self : List[Any] ): """simple docstring""" A_ : List[str] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A_ : Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) A_ : Tuple = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 ) A_ : List[str] = AlignProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _lowerCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _lowerCamelCase ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Dict = self.get_image_processor() A_ : Any = self.get_tokenizer() A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : Any = self.prepare_image_inputs() A_ : List[Any] = image_processor(_lowerCamelCase , return_tensors='''np''' ) A_ : str = processor(images=_lowerCamelCase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _a ( self : Dict ): """simple docstring""" A_ : str = self.get_image_processor() A_ : List[str] = self.get_tokenizer() A_ : Optional[int] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : int = '''lower newer''' A_ : str = processor(text=_lowerCamelCase ) A_ : Dict = tokenizer(_lowerCamelCase , padding='''max_length''' , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _a ( self : str ): """simple docstring""" A_ : Optional[int] = self.get_image_processor() A_ : Optional[Any] = self.get_tokenizer() A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : List[Any] = '''lower newer''' A_ : Optional[int] = self.prepare_image_inputs() A_ : List[Any] = processor(text=_lowerCamelCase , images=_lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(_lowerCamelCase ): processor() def _a ( self : List[str] ): """simple docstring""" A_ : Optional[Any] = self.get_image_processor() A_ : Optional[int] = self.get_tokenizer() A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A_ : str = processor.batch_decode(_lowerCamelCase ) A_ : Union[str, Any] = tokenizer.batch_decode(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : str = self.get_image_processor() A_ : Tuple = self.get_tokenizer() A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : str = '''lower newer''' A_ : List[str] = self.prepare_image_inputs() A_ : Tuple = processor(text=_lowerCamelCase , images=_lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
4
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor snake_case__ = logging.get_logger(__name__) class UpperCamelCase_ (a__ ): """simple docstring""" def __init__( self : Tuple , *_lowerCamelCase : List[str] , **_lowerCamelCase : Tuple ): """simple docstring""" warnings.warn( '''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use FlavaImageProcessor instead.''' , _lowerCamelCase , ) super().__init__(*_lowerCamelCase , **_lowerCamelCase )
4
'''simple docstring''' import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = """▁""" snake_case__ = { """vocab_file""": """vocab.json""", """spm_file""": """sentencepiece.bpe.model""", } snake_case__ = { """vocab_file""": { """facebook/s2t-small-librispeech-asr""": ( """https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json""" ), }, """spm_file""": { """facebook/s2t-small-librispeech-asr""": ( """https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model""" ) }, } snake_case__ = { """facebook/s2t-small-librispeech-asr""": 10_24, } snake_case__ = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""] snake_case__ = {"""mustc""": MUSTC_LANGS} class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = VOCAB_FILES_NAMES _lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase = MAX_MODEL_INPUT_SIZES _lowerCAmelCase = ['input_ids', 'attention_mask'] _lowerCAmelCase = [] def __init__( self : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : str="<s>" , _lowerCamelCase : Union[str, Any]="</s>" , _lowerCamelCase : Dict="<pad>" , _lowerCamelCase : str="<unk>" , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : int=False , _lowerCamelCase : Any=None , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[Dict[str, Any]] = None , **_lowerCamelCase : Optional[int] , ): """simple docstring""" A_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , do_upper_case=_lowerCamelCase , do_lower_case=_lowerCamelCase , tgt_lang=_lowerCamelCase , lang_codes=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , ) A_ : Optional[int] = do_upper_case A_ : Tuple = do_lower_case A_ : Tuple = load_json(_lowerCamelCase ) A_ : Tuple = {v: k for k, v in self.encoder.items()} A_ : List[Any] = spm_file A_ : List[str] = load_spm(_lowerCamelCase , self.sp_model_kwargs ) if lang_codes is not None: A_ : Any = lang_codes A_ : Optional[Any] = LANGUAGES[lang_codes] A_ : Optional[Any] = [f'<lang:{lang}>' for lang in self.langs] A_ : Union[str, Any] = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs} A_ : Optional[int] = self.lang_tokens A_ : int = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: A_ : Dict = {} @property def _a ( self : Tuple ): """simple docstring""" return len(self.encoder ) @property def _a ( self : int ): """simple docstring""" return self._tgt_lang @tgt_lang.setter def _a ( self : List[str] , _lowerCamelCase : Any ): """simple docstring""" A_ : int = new_tgt_lang self.set_tgt_lang_special_tokens(_lowerCamelCase ) def _a ( self : Tuple , _lowerCamelCase : str ): """simple docstring""" A_ : List[str] = self.lang_code_to_id[tgt_lang] A_ : Optional[Any] = [lang_code_id] def _a ( self : Optional[Any] , _lowerCamelCase : str ): """simple docstring""" return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase ) def _a ( self : List[Any] , _lowerCamelCase : int ): """simple docstring""" return self.encoder.get(_lowerCamelCase , self.encoder[self.unk_token] ) def _a ( self : int , _lowerCamelCase : int ): """simple docstring""" return self.decoder.get(_lowerCamelCase , self.unk_token ) def _a ( self : int , _lowerCamelCase : List[str] ): """simple docstring""" A_ : List[Any] = [] A_ : Any = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: A_ : Union[str, Any] = self.sp_model.decode(_lowerCamelCase ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " A_ : Optional[Any] = [] else: current_sub_tokens.append(_lowerCamelCase ) A_ : Tuple = self.sp_model.decode(_lowerCamelCase ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def _a ( self : int , _lowerCamelCase : Dict , _lowerCamelCase : Any=None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def _a ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase ) A_ : Tuple = [1] * len(self.prefix_tokens ) A_ : Tuple = [1] if token_ids_a is None: return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones def _a ( self : Dict ): """simple docstring""" A_ : Union[str, Any] = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Union[str, Any] ): """simple docstring""" A_ : Dict = self.__dict__.copy() A_ : List[Any] = None return state def __setstate__( self : List[str] , _lowerCamelCase : Dict ): """simple docstring""" A_ : Dict = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): A_ : Optional[int] = {} A_ : int = load_spm(self.spm_file , self.sp_model_kwargs ) def _a ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ): """simple docstring""" A_ : Dict = Path(_lowerCamelCase ) assert save_dir.is_dir(), f'{save_directory} should be a directory' A_ : Optional[int] = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file'''] ) A_ : Optional[int] = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file'''] ) save_json(self.encoder , _lowerCamelCase ) if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , _lowerCamelCase ) elif not os.path.isfile(self.spm_file ): with open(_lowerCamelCase , '''wb''' ) as fi: A_ : List[str] = self.sp_model.serialized_model_proto() fi.write(_lowerCamelCase ) return (str(_lowerCamelCase ), str(_lowerCamelCase )) def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor: A_ : Tuple = sentencepiece.SentencePieceProcessor(**lowerCamelCase__ ) spm.Load(str(lowerCamelCase__ ) ) return spm def snake_case__ ( lowerCamelCase__ : str ) -> Union[Dict, List]: with open(lowerCamelCase__ , '''r''' ) as f: return json.load(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : str ) -> None: with open(lowerCamelCase__ , '''w''' ) as f: json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=2 )
4
1
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = { """Visual-Attention-Network/van-base""": ( """https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json""" ), } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'van' def __init__( self : List[str] , _lowerCamelCase : Dict=224 , _lowerCamelCase : str=3 , _lowerCamelCase : List[str]=[7, 3, 3, 3] , _lowerCamelCase : Tuple=[4, 2, 2, 2] , _lowerCamelCase : Union[str, Any]=[64, 128, 320, 512] , _lowerCamelCase : Optional[int]=[3, 3, 12, 3] , _lowerCamelCase : str=[8, 8, 4, 4] , _lowerCamelCase : Optional[int]="gelu" , _lowerCamelCase : Any=0.02 , _lowerCamelCase : str=1E-6 , _lowerCamelCase : Union[str, Any]=1E-2 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : Any=0.0 , **_lowerCamelCase : Optional[Any] , ): """simple docstring""" super().__init__(**_lowerCamelCase ) A_ : int = image_size A_ : int = num_channels A_ : Any = patch_sizes A_ : List[Any] = strides A_ : Tuple = hidden_sizes A_ : Any = depths A_ : Dict = mlp_ratios A_ : Optional[int] = hidden_act A_ : str = initializer_range A_ : Optional[Any] = layer_norm_eps A_ : List[str] = layer_scale_init_value A_ : Optional[int] = drop_path_rate A_ : Optional[Any] = dropout_rate
4
'''simple docstring''' import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 snake_case__ = sys.version_info >= (3, 10) def snake_case__ ( lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : str=None ) -> List[Any]: return field(default_factory=lambda: default , metadata=lowerCamelCase__ ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 4_2 _lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = False _lowerCAmelCase = True _lowerCAmelCase = None class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'titi' _lowerCAmelCase = 'toto' class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'titi' _lowerCAmelCase = 'toto' _lowerCAmelCase = 4_2 @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = "toto" def _a ( self : Optional[Any] ): """simple docstring""" A_ : Optional[int] = BasicEnum(self.foo ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = "toto" def _a ( self : Tuple ): """simple docstring""" A_ : Optional[Any] = MixedTypeEnum(self.foo ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} ) _lowerCAmelCase = None _lowerCAmelCase = list_field(default=[] ) _lowerCAmelCase = list_field(default=[] ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = list_field(default=[] ) _lowerCAmelCase = list_field(default=[1, 2, 3] ) _lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) _lowerCAmelCase = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = field() _lowerCAmelCase = field() _lowerCAmelCase = field() def _a ( self : Tuple ): """simple docstring""" A_ : Tuple = BasicEnum(self.required_enum ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = field() _lowerCAmelCase = None _lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} ) _lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) if is_python_no_less_than_3_10: @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = False _lowerCAmelCase = True _lowerCAmelCase = None @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} ) _lowerCAmelCase = None _lowerCAmelCase = list_field(default=[] ) _lowerCAmelCase = list_field(default=[] ) class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : List[str] , _lowerCamelCase : argparse.ArgumentParser , _lowerCamelCase : argparse.ArgumentParser ): """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): A_ : Union[str, Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''} A_ : Optional[Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('''choices''' , _lowerCamelCase ) and yy.get('''choices''' , _lowerCamelCase ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['''type'''](_lowerCamelCase ) , yy['''type'''](_lowerCamelCase ) ) del xx["type"], yy["type"] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Optional[int] ): """simple docstring""" A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase ) A_ : Optional[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--bar''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--baz''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--flag''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Union[str, Any] = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5'''] ((A_) ,) : List[str] = parser.parse_args_into_dataclasses(_lowerCamelCase , look_for_args_file=_lowerCamelCase ) self.assertFalse(example.flag ) def _a ( self : Dict ): """simple docstring""" A_ : int = HfArgumentParser(_lowerCamelCase ) A_ : int = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=42 , type=_lowerCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Dict ): """simple docstring""" A_ : Any = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) expected.add_argument('''--baz''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('''--no_baz''' , action='''store_false''' , default=_lowerCamelCase , dest='''baz''' ) expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase ) A_ : Dict = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(_lowerCamelCase ) for dataclass_type in dataclass_types: A_ : Any = HfArgumentParser(_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : List[Any] = parser.parse_args([] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : Optional[int] = parser.parse_args(['''--foo''', '''--no_baz'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : Union[str, Any] = parser.parse_args(['''--foo''', '''--baz'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : List[str] = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : List[Any] = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) def _a ( self : List[Any] ): """simple docstring""" A_ : str = HfArgumentParser(_lowerCamelCase ) A_ : Optional[int] = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : str = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) A_ : List[Any] = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) A_ : int = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) A_ : Dict = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) A_ : Tuple = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) A_ : List[str] = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def _a ( self : Optional[int] ): """simple docstring""" @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = "toto" A_ : List[str] = HfArgumentParser(_lowerCamelCase ) A_ : Tuple = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Tuple = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) A_ : List[str] = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) A_ : int = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) def _a ( self : Dict ): """simple docstring""" A_ : int = HfArgumentParser(_lowerCamelCase ) A_ : List[Any] = argparse.ArgumentParser() expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_lowerCamelCase ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase ) expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Optional[int] = parser.parse_args([] ) self.assertEqual( _lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , ) A_ : str = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() ) self.assertEqual(_lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) ) def _a ( self : Dict ): """simple docstring""" A_ : Optional[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=_lowerCamelCase , type=_lowerCamelCase ) expected.add_argument('''--bar''' , default=_lowerCamelCase , type=_lowerCamelCase , help='''help message''' ) expected.add_argument('''--baz''' , default=_lowerCamelCase , type=_lowerCamelCase ) expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) A_ : Tuple = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(_lowerCamelCase ) for dataclass_type in dataclass_types: A_ : int = HfArgumentParser(_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : List[Any] = parser.parse_args([] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , bar=_lowerCamelCase , baz=_lowerCamelCase , ces=[] , des=[] ) ) A_ : Optional[Any] = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() ) self.assertEqual(_lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) ) def _a ( self : List[Any] ): """simple docstring""" A_ : List[Any] = HfArgumentParser(_lowerCamelCase ) A_ : Dict = argparse.ArgumentParser() expected.add_argument('''--required_list''' , nargs='''+''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--required_str''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase ) A_ : List[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , ) expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : List[Any] = HfArgumentParser(_lowerCamelCase ) A_ : Union[str, Any] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } A_ : Optional[int] = parser.parse_dict(_lowerCamelCase )[0] A_ : str = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : List[str] ): """simple docstring""" A_ : Any = HfArgumentParser(_lowerCamelCase ) A_ : List[str] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, '''extra''': 42, } self.assertRaises(_lowerCamelCase , parser.parse_dict , _lowerCamelCase , allow_extra_keys=_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase ) A_ : List[str] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: A_ : Tuple = os.path.join(_lowerCamelCase , '''temp_json''' ) os.mkdir(_lowerCamelCase ) with open(temp_local_path + '''.json''' , '''w+''' ) as f: json.dump(_lowerCamelCase , _lowerCamelCase ) A_ : List[str] = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0] A_ : Optional[Any] = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : int ): """simple docstring""" A_ : int = HfArgumentParser(_lowerCamelCase ) A_ : Tuple = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: A_ : int = os.path.join(_lowerCamelCase , '''temp_yaml''' ) os.mkdir(_lowerCamelCase ) with open(temp_local_path + '''.yaml''' , '''w+''' ) as f: yaml.dump(_lowerCamelCase , _lowerCamelCase ) A_ : Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0] A_ : int = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Dict = HfArgumentParser(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase )
4
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin snake_case__ = False @skip_mps class UpperCamelCase_ (a__, a__, a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = StableDiffusionAttendAndExcitePipeline _lowerCAmelCase = False _lowerCAmelCase = TEXT_TO_IMAGE_PARAMS _lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} ) _lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS _lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def _a ( cls : Any ): """simple docstring""" super().setUpClass() torch.use_deterministic_algorithms(_lowerCamelCase ) @classmethod def _a ( cls : int ): """simple docstring""" super().tearDownClass() torch.use_deterministic_algorithms(_lowerCamelCase ) def _a ( self : Optional[int] ): """simple docstring""" torch.manual_seed(0 ) A_ : Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_lowerCamelCase , ) A_ : Any = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , ) torch.manual_seed(0 ) A_ : Optional[int] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) A_ : Any = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , ) A_ : Any = CLIPTextModel(_lowerCamelCase ) A_ : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) A_ : Optional[Any] = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def _a ( self : Tuple , _lowerCamelCase : str , _lowerCamelCase : Any=0 ): """simple docstring""" if str(_lowerCamelCase ).startswith('''mps''' ): A_ : Dict = torch.manual_seed(_lowerCamelCase ) else: A_ : List[str] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase ) A_ : Dict = { '''prompt''': '''a cat and a frog''', '''token_indices''': [2, 5], '''generator''': generator, '''num_inference_steps''': 1, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''max_iter_to_alter''': 2, '''thresholds''': {0: 0.7}, } return inputs def _a ( self : Optional[int] ): """simple docstring""" A_ : List[Any] = '''cpu''' A_ : List[Any] = self.get_dummy_components() A_ : int = self.pipeline_class(**_lowerCamelCase ) pipe.to(_lowerCamelCase ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) A_ : Dict = self.get_dummy_inputs(_lowerCamelCase ) A_ : Optional[Any] = pipe(**_lowerCamelCase ).images A_ : Optional[Any] = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 64, 64, 3) ) A_ : Optional[Any] = np.array( [0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] ) A_ : List[str] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_lowerCamelCase , 1E-3 ) def _a ( self : Dict ): """simple docstring""" super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 ) def _a ( self : List[str] ): """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def _a ( self : List[str] ): """simple docstring""" self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 ) def _a ( self : Tuple ): """simple docstring""" super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def _a ( self : Any ): """simple docstring""" super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 ) def _a ( self : int ): """simple docstring""" super().test_save_load_local(expected_max_difference=5E-4 ) def _a ( self : Union[str, Any] ): """simple docstring""" super().test_save_load_optional_components(expected_max_difference=4E-4 ) @require_torch_gpu @slow class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @classmethod def _a ( cls : int ): """simple docstring""" super().setUpClass() torch.use_deterministic_algorithms(_lowerCamelCase ) @classmethod def _a ( cls : str ): """simple docstring""" super().tearDownClass() torch.use_deterministic_algorithms(_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self : List[str] ): """simple docstring""" A_ : Union[str, Any] = torch.manual_seed(51 ) A_ : Optional[int] = StableDiffusionAttendAndExcitePipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , safety_checker=_lowerCamelCase , torch_dtype=torch.floataa ) pipe.to('''cuda''' ) A_ : List[Any] = '''a painting of an elephant with glasses''' A_ : List[str] = [5, 7] A_ : Tuple = pipe( prompt=_lowerCamelCase , token_indices=_lowerCamelCase , guidance_scale=7.5 , generator=_lowerCamelCase , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0] A_ : Union[str, Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' ) assert np.abs((expected_image - image).max() ) < 5E-1
4
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 snake_case__ = get_tests_dir("""fixtures""") class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : List[str] ): """simple docstring""" A_ : List[Any] = mock.Mock() A_ : List[str] = 500 A_ : Tuple = {} A_ : int = HTTPError A_ : Optional[Any] = {} # Download this model to make sure it's in the cache. A_ : Tuple = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' , return_value=_lowerCamelCase ) as mock_head: A_ : List[Any] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # This check we did call the fake head request mock_head.assert_called() def _a ( self : Tuple ): """simple docstring""" A_ : Tuple = ViTImageProcessor.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' ) def _a ( self : Dict ): """simple docstring""" with self.assertRaises(_lowerCamelCase ): # config is in subfolder, the following should not work without specifying the subfolder A_ : Any = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' ) A_ : Tuple = AutoImageProcessor.from_pretrained( '''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' ) self.assertIsNotNone(_lowerCamelCase ) @is_staging_test class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @classmethod def _a ( cls : Tuple ): """simple docstring""" A_ : int = TOKEN HfFolder.save_token(_lowerCamelCase ) @classmethod def _a ( cls : str ): """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-image-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' ) except HTTPError: pass def _a ( self : List[Any] ): """simple docstring""" A_ : Dict = ViTImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token ) A_ : Optional[int] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( _lowerCamelCase , repo_id='''test-image-processor''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token ) A_ : List[Any] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : int = ViTImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token ) A_ : List[str] = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( _lowerCamelCase , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token ) A_ : Any = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) def _a ( self : Optional[Any] ): """simple docstring""" CustomImageProcessor.register_for_auto_class() A_ : Any = CustomImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , ) A_ : str = AutoImageProcessor.from_pretrained( f'{USER}/test-dynamic-image-processor' , trust_remote_code=_lowerCamelCase ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
4
1
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : Any ) -> List[str]: A_ : Dict = len(lowerCamelCase__ ) while cur > 1: # Find the maximum number in arr A_ : Optional[int] = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi A_ : Tuple = arr[mi::-1] + arr[mi + 1 : len(lowerCamelCase__ )] # Reverse whole list A_ : List[str] = arr[cur - 1 :: -1] + arr[cur : len(lowerCamelCase__ )] cur -= 1 return arr if __name__ == "__main__": snake_case__ = input("""Enter numbers separated by a comma:\n""").strip() snake_case__ = [int(item) for item in user_input.split(""",""")] print(pancake_sort(unsorted))
4
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = ( 'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.' 'It takes two arguments named `image` which should be the original image, and `label` which should be a text ' 'describing the elements what should be identified in the segmentation mask. The tool returns the mask.' ) _lowerCAmelCase = 'CIDAS/clipseg-rd64-refined' _lowerCAmelCase = 'image_segmenter' _lowerCAmelCase = CLIPSegForImageSegmentation _lowerCAmelCase = ['image', 'text'] _lowerCAmelCase = ['image'] def __init__( self : Optional[int] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Union[str, Any] ): """simple docstring""" requires_backends(self , ['''vision'''] ) super().__init__(*_lowerCamelCase , **_lowerCamelCase ) def _a ( self : List[str] , _lowerCamelCase : "Image" , _lowerCamelCase : str ): """simple docstring""" return self.pre_processor(text=[label] , images=[image] , padding=_lowerCamelCase , return_tensors='''pt''' ) def _a ( self : Union[str, Any] , _lowerCamelCase : Optional[int] ): """simple docstring""" with torch.no_grad(): A_ : Optional[int] = self.model(**_lowerCamelCase ).logits return logits def _a ( self : List[str] , _lowerCamelCase : Optional[int] ): """simple docstring""" A_ : int = outputs.cpu().detach().numpy() A_ : Tuple = 0 A_ : List[str] = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
4
1
'''simple docstring''' import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline snake_case__ = datasets.utils.logging.get_logger(__name__) @dataclass class UpperCamelCase_ (datasets.BuilderConfig ): """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = "utf-8" _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = True # deprecated _lowerCAmelCase = None # deprecated _lowerCAmelCase = 1_0 << 2_0 # 10MB _lowerCAmelCase = None class UpperCamelCase_ (datasets.ArrowBasedBuilder ): """simple docstring""" _lowerCAmelCase = JsonConfig def _a ( self : int ): """simple docstring""" if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' ) A_ : List[Any] = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' ) if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' ) return datasets.DatasetInfo(features=self.config.features ) def _a ( self : Any , _lowerCamelCase : List[str] ): """simple docstring""" if not self.config.data_files: raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' ) A_ : int = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_lowerCamelCase , (str, list, tuple) ): A_ : Union[str, Any] = data_files if isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : List[str] = [files] A_ : List[Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] A_ : Tuple = [] for split_name, files in data_files.items(): if isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : int = [files] A_ : Union[str, Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files] splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'''files''': files} ) ) return splits def _a ( self : int , _lowerCamelCase : pa.Table ): """simple docstring""" if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): A_ : Optional[int] = self.config.features.arrow_schema.field(_lowerCamelCase ).type A_ : Optional[int] = pa_table.append_column(_lowerCamelCase , pa.array([None] * len(_lowerCamelCase ) , type=_lowerCamelCase ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example A_ : str = table_cast(_lowerCamelCase , self.config.features.arrow_schema ) return pa_table def _a ( self : List[str] , _lowerCamelCase : int ): """simple docstring""" for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: A_ : int = json.load(_lowerCamelCase ) # We keep only the field we are interested in A_ : List[str] = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(_lowerCamelCase , (list, tuple) ): A_ : int = set().union(*[row.keys() for row in dataset] ) A_ : List[str] = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys} else: A_ : Tuple = dataset A_ : Dict = pa.Table.from_pydict(_lowerCamelCase ) yield file_idx, self._cast_table(_lowerCamelCase ) # If the file has one json object per line else: with open(_lowerCamelCase , '''rb''' ) as f: A_ : int = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small A_ : int = max(self.config.chunksize // 32 , 16 << 10 ) A_ : int = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: A_ : Any = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(_lowerCamelCase ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": A_ : Optional[Any] = batch.decode(self.config.encoding , errors=_lowerCamelCase ).encode('''utf-8''' ) try: while True: try: A_ : List[Any] = paj.read_json( io.BytesIO(_lowerCamelCase ) , read_options=paj.ReadOptions(block_size=_lowerCamelCase ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(_lowerCamelCase , pa.ArrowInvalid ) and "straddling" not in str(_lowerCamelCase ) or block_size > len(_lowerCamelCase ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f'Batch of {len(_lowerCamelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( _lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: A_ : Optional[Any] = json.load(_lowerCamelCase ) except json.JSONDecodeError: logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(_lowerCamelCase , _lowerCamelCase ): # list is the only sequence type supported in JSON try: A_ : Optional[int] = set().union(*[row.keys() for row in dataset] ) A_ : Tuple = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys} A_ : int = pa.Table.from_pydict(_lowerCamelCase ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' ) raise ValueError(f'Not able to read records in the JSON file at {file}.' ) from None yield file_idx, self._cast_table(_lowerCamelCase ) break else: logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' ) raise ValueError( f'Not able to read records in the JSON file at {file}. ' f'You should probably indicate the field of the JSON file containing your records. ' f'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. ' f'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_lowerCamelCase ) batch_idx += 1
4
'''simple docstring''' from collections.abc import Sequence def snake_case__ ( lowerCamelCase__ : Sequence[float] , lowerCamelCase__ : bool = False ) -> float: if not arr: return 0 A_ : Union[str, Any] = 0 if allow_empty_subarrays else float('''-inf''' ) A_ : str = 0.0 for num in arr: A_ : Any = max(0 if allow_empty_subarrays else num , curr_sum + num ) A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() snake_case__ = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'{max_subarray_sum(nums) = }')
4
1
'''simple docstring''' snake_case__ = { """A""": ["""B""", """C""", """E"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F""", """G"""], """D""": ["""B"""], """E""": ["""A""", """B""", """D"""], """F""": ["""C"""], """G""": ["""C"""], } def snake_case__ ( lowerCamelCase__ : dict , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] ) -> list[str]: A_ : List[str] = set() # keep track of all the paths to be checked A_ : List[Any] = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue A_ : Optional[Any] = queue.pop(0 ) # get the last node from the path A_ : Optional[int] = path[-1] if node not in explored: A_ : Union[str, Any] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: A_ : Dict = list(lowerCamelCase__ ) new_path.append(lowerCamelCase__ ) queue.append(lowerCamelCase__ ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(lowerCamelCase__ ) # in case there's no path between the 2 nodes return [] def snake_case__ ( lowerCamelCase__ : dict , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any ) -> int: if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 A_ : str = [start] A_ : Dict = set(lowerCamelCase__ ) # Keep tab on distances from `start` node. A_ : Optional[Any] = {start: 0, target: -1} while queue: A_ : List[str] = queue.pop(0 ) if node == target: A_ : Tuple = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(lowerCamelCase__ ) queue.append(lowerCamelCase__ ) A_ : List[str] = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
4
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json""" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'speech_to_text_2' _lowerCAmelCase = ['past_key_values'] _lowerCAmelCase = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : Optional[Any] , _lowerCamelCase : Optional[Any]=10000 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : int=2048 , _lowerCamelCase : Dict=4 , _lowerCamelCase : str=0.0 , _lowerCamelCase : int=True , _lowerCamelCase : int="relu" , _lowerCamelCase : Any=256 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : int=2 , _lowerCamelCase : List[str]=True , _lowerCamelCase : str=1 , _lowerCamelCase : List[Any]=0 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : Tuple=1024 , **_lowerCamelCase : int , ): """simple docstring""" A_ : Optional[int] = vocab_size A_ : Tuple = d_model A_ : List[str] = decoder_ffn_dim A_ : str = decoder_layers A_ : Any = decoder_attention_heads A_ : int = dropout A_ : str = attention_dropout A_ : Optional[int] = activation_dropout A_ : str = activation_function A_ : List[Any] = init_std A_ : Union[str, Any] = decoder_layerdrop A_ : Any = use_cache A_ : Optional[Any] = decoder_layers A_ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True A_ : Optional[Any] = max_target_positions super().__init__( pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
4
1
'''simple docstring''' import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger snake_case__ = get_logger(__name__) snake_case__ = R""" Args: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search kwargs (`Dict[str, Any]`, *optional*): Additional logits processor specific kwargs. Return: `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. """ class UpperCamelCase_ : """simple docstring""" @add_start_docstrings(_lowerCamelCase ) def __call__( self : Optional[int] , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray ): """simple docstring""" raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) class UpperCamelCase_ : """simple docstring""" @add_start_docstrings(_lowerCamelCase ) def __call__( self : Optional[Any] , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray ): """simple docstring""" raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) class UpperCamelCase_ (a__ ): """simple docstring""" @add_start_docstrings(_lowerCamelCase ) def __call__( self : str , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : int , **_lowerCamelCase : Optional[int] ): """simple docstring""" for processor in self: A_ : int = inspect.signature(processor.__call__ ).parameters if len(_lowerCamelCase ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( f'Make sure that all the required parameters: {list(function_args.keys() )} for ' f'{processor.__class__} are passed to the logits processor.' ) A_ : List[str] = processor(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) else: A_ : Dict = processor(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) return scores class UpperCamelCase_ (a__ ): """simple docstring""" def __init__( self : Any , _lowerCamelCase : float ): """simple docstring""" if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not (temperature > 0): raise ValueError(f'`temperature` has to be a strictly positive float, but is {temperature}' ) A_ : int = temperature def __call__( self : List[Any] , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : int ): """simple docstring""" A_ : Optional[Any] = scores / self.temperature return scores class UpperCamelCase_ (a__ ): """simple docstring""" def __init__( self : Optional[int] , _lowerCamelCase : float , _lowerCamelCase : float = -float('''Inf''' ) , _lowerCamelCase : int = 1 ): """simple docstring""" if not isinstance(_lowerCamelCase , _lowerCamelCase ) or (top_p < 0 or top_p > 1.0): raise ValueError(f'`top_p` has to be a float > 0 and < 1, but is {top_p}' ) if not isinstance(_lowerCamelCase , _lowerCamelCase ) or (min_tokens_to_keep < 1): raise ValueError(f'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}' ) A_ : int = top_p A_ : Union[str, Any] = filter_value A_ : Union[str, Any] = min_tokens_to_keep def __call__( self : str , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : int ): """simple docstring""" A_ ,A_ : int = lax.top_k(_lowerCamelCase , scores.shape[-1] ) A_ : str = jnp.full_like(_lowerCamelCase , self.filter_value ) A_ : Dict = jax.nn.softmax(_lowerCamelCase , axis=-1 ).cumsum(axis=-1 ) A_ : Any = cumulative_probs < self.top_p # include the token that is higher than top_p as well A_ : int = jnp.roll(_lowerCamelCase , 1 ) score_mask |= score_mask.at[:, 0].set(_lowerCamelCase ) # min tokens to keep A_ : List[Any] = score_mask.at[:, : self.min_tokens_to_keep].set(_lowerCamelCase ) A_ : Dict = jnp.where(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) A_ : Dict = jax.lax.sort_key_val(_lowerCamelCase , _lowerCamelCase )[-1] return next_scores class UpperCamelCase_ (a__ ): """simple docstring""" def __init__( self : Tuple , _lowerCamelCase : int , _lowerCamelCase : float = -float('''Inf''' ) , _lowerCamelCase : int = 1 ): """simple docstring""" if not isinstance(_lowerCamelCase , _lowerCamelCase ) or top_k <= 0: raise ValueError(f'`top_k` has to be a strictly positive integer, but is {top_k}' ) A_ : str = max(_lowerCamelCase , _lowerCamelCase ) A_ : Optional[int] = filter_value def __call__( self : Dict , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : int ): """simple docstring""" A_ ,A_ : Dict = scores.shape A_ : Optional[Any] = jnp.full(batch_size * vocab_size , self.filter_value ) A_ : Optional[Any] = min(self.top_k , scores.shape[-1] ) # Safety check A_ ,A_ : List[Any] = lax.top_k(_lowerCamelCase , _lowerCamelCase ) A_ : Any = jnp.broadcast_to((jnp.arange(_lowerCamelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() A_ : int = topk_scores.flatten() A_ : Optional[int] = topk_indices.flatten() + shift A_ : Any = next_scores_flat.at[topk_indices_flat].set(_lowerCamelCase ) A_ : Optional[Any] = next_scores_flat.reshape(_lowerCamelCase , _lowerCamelCase ) return next_scores class UpperCamelCase_ (a__ ): """simple docstring""" def __init__( self : str , _lowerCamelCase : int ): """simple docstring""" A_ : Any = bos_token_id def __call__( self : Union[str, Any] , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : int ): """simple docstring""" A_ : Optional[Any] = jnp.full(scores.shape , -float('''inf''' ) ) A_ : List[Any] = 1 - jnp.bool_(cur_len - 1 ) A_ : List[str] = jnp.where(_lowerCamelCase , new_scores.at[:, self.bos_token_id].set(0 ) , _lowerCamelCase ) return scores class UpperCamelCase_ (a__ ): """simple docstring""" def __init__( self : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : int ): """simple docstring""" A_ : int = max_length A_ : Any = eos_token_id def __call__( self : Dict , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : int ): """simple docstring""" A_ : Tuple = jnp.full(scores.shape , -float('''inf''' ) ) A_ : Optional[Any] = 1 - jnp.bool_(cur_len - self.max_length + 1 ) A_ : Optional[int] = jnp.where(_lowerCamelCase , new_scores.at[:, self.eos_token_id].set(0 ) , _lowerCamelCase ) return scores class UpperCamelCase_ (a__ ): """simple docstring""" def __init__( self : Tuple , _lowerCamelCase : int , _lowerCamelCase : int ): """simple docstring""" if not isinstance(_lowerCamelCase , _lowerCamelCase ) or min_length < 0: raise ValueError(f'`min_length` has to be a positive integer, but is {min_length}' ) if not isinstance(_lowerCamelCase , _lowerCamelCase ) or eos_token_id < 0: raise ValueError(f'`eos_token_id` has to be a positive integer, but is {eos_token_id}' ) A_ : List[str] = min_length A_ : Optional[int] = eos_token_id def __call__( self : Any , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : int ): """simple docstring""" A_ : int = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) A_ : Any = jnp.where(_lowerCamelCase , scores.at[:, self.eos_token_id].set(-float('''inf''' ) ) , _lowerCamelCase ) return scores class UpperCamelCase_ (a__ ): """simple docstring""" def __init__( self : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] ): """simple docstring""" A_ : int = list(_lowerCamelCase ) A_ : Tuple = begin_index def __call__( self : Optional[Any] , _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : int ): """simple docstring""" A_ : Any = 1 - jnp.bool_(cur_len - self.begin_index ) A_ : Dict = jnp.where(_lowerCamelCase , scores.at[:, self.begin_suppress_tokens].set(-float('''inf''' ) ) , _lowerCamelCase ) return scores class UpperCamelCase_ (a__ ): """simple docstring""" def __init__( self : Tuple , _lowerCamelCase : list ): """simple docstring""" A_ : int = list(_lowerCamelCase ) def __call__( self : Optional[Any] , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : int ): """simple docstring""" A_ : Any = scores.at[..., self.suppress_tokens].set(-float('''inf''' ) ) return scores class UpperCamelCase_ (a__ ): """simple docstring""" def __init__( self : List[str] , _lowerCamelCase : Tuple ): """simple docstring""" A_ : List[Any] = dict(_lowerCamelCase ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. A_ : int = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: A_ : List[str] = force_token_array.at[index].set(_lowerCamelCase ) A_ : List[Any] = jnp.intaa(_lowerCamelCase ) def __call__( self : List[str] , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : int ): """simple docstring""" def _force_token(_lowerCamelCase : Union[str, Any] ): A_ : List[str] = scores.shape[0] A_ : List[Any] = self.force_token_array[generation_idx] A_ : Optional[Any] = jnp.ones_like(_lowerCamelCase , dtype=scores.dtype ) * -float('''inf''' ) A_ : Union[str, Any] = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) A_ : Union[str, Any] = lax.dynamic_update_slice(_lowerCamelCase , _lowerCamelCase , (0, current_token) ) return new_scores A_ : Union[str, Any] = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(_lowerCamelCase ) , lambda: scores , ) , ) return scores class UpperCamelCase_ (a__ ): """simple docstring""" def __init__( self : Tuple , _lowerCamelCase : Any , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] ): """simple docstring""" A_ : Tuple = generate_config.eos_token_id A_ : Any = generate_config.no_timestamps_token_id A_ : Union[str, Any] = generate_config.no_timestamps_token_id + 1 A_ : List[str] = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(_lowerCamelCase , '''max_initial_timestamp_index''' ): A_ : Dict = generate_config.max_initial_timestamp_index else: A_ : int = model_config.vocab_size if self.max_initial_timestamp_index is None: A_ : Tuple = model_config.vocab_size def __call__( self : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] ): """simple docstring""" A_ : str = scores.at[:, self.no_timestamps_token_id].set(-float('''inf''' ) ) def handle_pairs(_lowerCamelCase : str , _lowerCamelCase : Any ): A_ : Optional[int] = jnp.where((cur_len - self.begin_index) >= 1 , _lowerCamelCase , _lowerCamelCase ) A_ : int = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _lowerCamelCase , ) A_ : Union[str, Any] = jnp.where((cur_len - self.begin_index) < 2 , _lowerCamelCase , _lowerCamelCase ) A_ : Any = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , _lowerCamelCase , _lowerCamelCase , ) return jnp.where( _lowerCamelCase , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('''inf''' ) ) , scores_k.at[: self.eos_token_id].set(-float('''inf''' ) ) , ) , _lowerCamelCase , ) A_ : Dict = jax.vmap(_lowerCamelCase )(_lowerCamelCase , _lowerCamelCase ) A_ : Dict = jnp.where(cur_len == self.begin_index , _lowerCamelCase , _lowerCamelCase ) A_ : List[Any] = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _lowerCamelCase , ) A_ : str = self.timestamp_begin + self.max_initial_timestamp_index A_ : Dict = jnp.where( _lowerCamelCase , scores.at[:, last_allowed + 1 :].set(-float('''inf''' ) ) , _lowerCamelCase , ) # if sum of probability over timestamps is above any other token, sample timestamp A_ : Tuple = jax.nn.log_softmax(_lowerCamelCase , axis=-1 ) def handle_cumulative_probs(_lowerCamelCase : List[str] , _lowerCamelCase : Any ): A_ : Optional[Any] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) A_ : Union[str, Any] = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('''inf''' ) ) , _lowerCamelCase , ) A_ : Any = jax.vmap(_lowerCamelCase )(_lowerCamelCase , _lowerCamelCase ) return scores
4
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING snake_case__ = logging.get_logger(__name__) snake_case__ = { """microsoft/table-transformer-detection""": ( """https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json""" ), } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'table-transformer' _lowerCAmelCase = ['past_key_values'] _lowerCAmelCase = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : Any , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Dict=None , _lowerCamelCase : int=3 , _lowerCamelCase : Any=100 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : Any=8 , _lowerCamelCase : Dict=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : int=8 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : Union[str, Any]=256 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : str=0.02 , _lowerCamelCase : Tuple=1.0 , _lowerCamelCase : Dict=False , _lowerCamelCase : str="sine" , _lowerCamelCase : str="resnet50" , _lowerCamelCase : Any=True , _lowerCamelCase : List[str]=False , _lowerCamelCase : Any=1 , _lowerCamelCase : int=5 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Optional[int]=1 , _lowerCamelCase : Any=1 , _lowerCamelCase : Dict=5 , _lowerCamelCase : str=2 , _lowerCamelCase : Union[str, Any]=0.1 , **_lowerCamelCase : int , ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) A_ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : str = backbone_config.get('''model_type''' ) A_ : Optional[int] = CONFIG_MAPPING[backbone_model_type] A_ : List[str] = config_class.from_dict(_lowerCamelCase ) # set timm attributes to None A_ ,A_ ,A_ : Union[str, Any] = None, None, None A_ : Optional[Any] = use_timm_backbone A_ : Optional[int] = backbone_config A_ : Optional[Any] = num_channels A_ : Dict = num_queries A_ : str = d_model A_ : List[str] = encoder_ffn_dim A_ : int = encoder_layers A_ : Optional[Any] = encoder_attention_heads A_ : List[str] = decoder_ffn_dim A_ : Any = decoder_layers A_ : List[str] = decoder_attention_heads A_ : Tuple = dropout A_ : Optional[Any] = attention_dropout A_ : Any = activation_dropout A_ : List[Any] = activation_function A_ : Dict = init_std A_ : Any = init_xavier_std A_ : List[Any] = encoder_layerdrop A_ : int = decoder_layerdrop A_ : Any = encoder_layers A_ : List[str] = auxiliary_loss A_ : List[Any] = position_embedding_type A_ : Optional[Any] = backbone A_ : Tuple = use_pretrained_backbone A_ : List[Any] = dilation # Hungarian matcher A_ : List[str] = class_cost A_ : str = bbox_cost A_ : Union[str, Any] = giou_cost # Loss coefficients A_ : Any = mask_loss_coefficient A_ : Optional[int] = dice_loss_coefficient A_ : Dict = bbox_loss_coefficient A_ : int = giou_loss_coefficient A_ : int = eos_coefficient super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase ) @property def _a ( self : List[Any] ): """simple docstring""" return self.encoder_attention_heads @property def _a ( self : Any ): """simple docstring""" return self.d_model class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = version.parse('1.11' ) @property def _a ( self : Tuple ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ] ) @property def _a ( self : Optional[int] ): """simple docstring""" return 1E-5 @property def _a ( self : str ): """simple docstring""" return 12
4
1
'''simple docstring''' import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## snake_case__ = 16 snake_case__ = 32 def snake_case__ ( lowerCamelCase__ : Accelerator , lowerCamelCase__ : DatasetDict , lowerCamelCase__ : List[int] , lowerCamelCase__ : List[int] , lowerCamelCase__ : int = 1_6 ) -> Tuple: A_ : str = AutoTokenizer.from_pretrained('''bert-base-cased''' ) A_ : Any = DatasetDict( { '''train''': dataset['''train'''].select(lowerCamelCase__ ), '''validation''': dataset['''train'''].select(lowerCamelCase__ ), '''test''': dataset['''validation'''], } ) def tokenize_function(lowerCamelCase__ : Union[str, Any] ): # max_length=None => use the model max length (it's actually the default) A_ : Dict = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): A_ : Optional[Any] = datasets.map( lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A_ : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(lowerCamelCase__ : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. A_ : Optional[Any] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": A_ : int = 1_6 elif accelerator.mixed_precision != "no": A_ : str = 8 else: A_ : Tuple = None return tokenizer.pad( lowerCamelCase__ , padding='''longest''' , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_tensors='''pt''' , ) # Instantiate dataloaders. A_ : Union[str, Any] = DataLoader( tokenized_datasets['''train'''] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ ) A_ : Optional[int] = DataLoader( tokenized_datasets['''validation'''] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ ) A_ : Dict = DataLoader( tokenized_datasets['''test'''] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ ) return train_dataloader, eval_dataloader, test_dataloader def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : int ) -> Optional[int]: # New Code # A_ : List[Any] = [] # Download the dataset A_ : Tuple = load_dataset('''glue''' , '''mrpc''' ) # Create our splits A_ : Tuple = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator A_ : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A_ : Tuple = config['''lr'''] A_ : int = int(config['''num_epochs'''] ) A_ : Optional[int] = int(config['''seed'''] ) A_ : Tuple = int(config['''batch_size'''] ) A_ : Optional[int] = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation A_ : Any = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: A_ : int = batch_size // MAX_GPU_BATCH_SIZE A_ : Any = MAX_GPU_BATCH_SIZE set_seed(lowerCamelCase__ ) # New Code # # Create our folds: A_ : List[Any] = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] ) A_ : List[str] = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(lowerCamelCase__ ): A_ ,A_ ,A_ : str = get_fold_dataloaders( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A_ : int = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCamelCase__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). A_ : Optional[int] = model.to(accelerator.device ) # Instantiate optimizer A_ : List[str] = AdamW(params=model.parameters() , lr=lowerCamelCase__ ) # Instantiate scheduler A_ : Optional[int] = get_linear_schedule_with_warmup( optimizer=lowerCamelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCamelCase__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A_ ,A_ ,A_ ,A_ ,A_ : Optional[int] = accelerator.prepare( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Now we train the model for epoch in range(lowerCamelCase__ ): model.train() for step, batch in enumerate(lowerCamelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) A_ : Tuple = model(**lowerCamelCase__ ) A_ : Optional[Any] = outputs.loss A_ : Optional[int] = loss / gradient_accumulation_steps accelerator.backward(lowerCamelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCamelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): A_ : Optional[Any] = model(**lowerCamelCase__ ) A_ : List[Any] = outputs.logits.argmax(dim=-1 ) A_ ,A_ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=lowerCamelCase__ , references=lowerCamelCase__ , ) A_ : Optional[int] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'epoch {epoch}:' , lowerCamelCase__ ) # New Code # # We also run predictions on the test set at the very end A_ : Union[str, Any] = [] for step, batch in enumerate(lowerCamelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): A_ : List[str] = model(**lowerCamelCase__ ) A_ : Any = outputs.logits A_ ,A_ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(lowerCamelCase__ , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: A_ : Tuple = torch.cat(lowerCamelCase__ , dim=0 ) A_ : Dict = torch.stack(lowerCamelCase__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) A_ : str = metric.compute(predictions=lowerCamelCase__ , references=lowerCamelCase__ ) accelerator.print('''Average test metrics from all folds:''' , lowerCamelCase__ ) def snake_case__ ( ) -> int: A_ : Union[str, Any] = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=lowerCamelCase__ , default=lowerCamelCase__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) # New Code # parser.add_argument('''--num_folds''' , type=lowerCamelCase__ , default=3 , help='''The number of splits to perform across the dataset''' ) A_ : int = parser.parse_args() A_ : Dict = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6} training_function(lowerCamelCase__ , lowerCamelCase__ ) if __name__ == "__main__": main()
4
'''simple docstring''' import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : List[str]=3 , _lowerCamelCase : Any=32 , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : int=10 , _lowerCamelCase : Union[str, Any]=[8, 16, 32, 64] , _lowerCamelCase : Dict=[1, 1, 2, 1] , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Any="relu" , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Dict=["stage2", "stage3", "stage4"] , _lowerCamelCase : Union[str, Any]=[2, 3, 4] , _lowerCamelCase : Tuple=1 , ): """simple docstring""" A_ : List[str] = parent A_ : List[str] = batch_size A_ : Union[str, Any] = image_size A_ : Tuple = num_channels A_ : Any = embeddings_size A_ : int = hidden_sizes A_ : Optional[Any] = depths A_ : List[Any] = is_training A_ : Optional[int] = use_labels A_ : int = hidden_act A_ : Tuple = num_labels A_ : Union[str, Any] = scope A_ : List[Any] = len(_lowerCamelCase ) A_ : Union[str, Any] = out_features A_ : List[Any] = out_indices A_ : Dict = num_groups def _a ( self : Optional[int] ): """simple docstring""" A_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : Union[str, Any] = None if self.use_labels: A_ : Any = ids_tensor([self.batch_size] , self.num_labels ) A_ : Any = self.get_config() return config, pixel_values, labels def _a ( self : Union[str, Any] ): """simple docstring""" return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def _a ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] ): """simple docstring""" A_ : Any = BitModel(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : int = model(_lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _a ( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] ): """simple docstring""" A_ : Dict = self.num_labels A_ : Optional[Any] = BitForImageClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self : Any , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[Any] ): """simple docstring""" A_ : List[Any] = BitBackbone(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : int = model(_lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None A_ : Optional[Any] = None A_ : int = BitBackbone(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : Optional[int] = model(_lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _a ( self : List[Any] ): """simple docstring""" A_ : Union[str, Any] = self.prepare_config_and_inputs() A_ ,A_ ,A_ : Union[str, Any] = config_and_inputs A_ : str = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class UpperCamelCase_ (a__, a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () _lowerCAmelCase = ( {'feature-extraction': BitModel, 'image-classification': BitForImageClassification} if is_torch_available() else {} ) _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False def _a ( self : Optional[Any] ): """simple docstring""" A_ : List[str] = BitModelTester(self ) A_ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _a ( self : List[Any] ): """simple docstring""" return @unittest.skip(reason='''Bit does not output attentions''' ) def _a ( self : str ): """simple docstring""" pass @unittest.skip(reason='''Bit does not use inputs_embeds''' ) def _a ( self : Union[str, Any] ): """simple docstring""" pass @unittest.skip(reason='''Bit does not support input and output embeddings''' ) def _a ( self : Any ): """simple docstring""" pass def _a ( self : List[Any] ): """simple docstring""" A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Dict = model_class(_lowerCamelCase ) A_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : int = [*signature.parameters.keys()] A_ : Union[str, Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ ,A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : str = model_class(config=_lowerCamelCase ) for name, module in model.named_modules(): if isinstance(_lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) def _a ( self : int ): """simple docstring""" def check_hidden_states_output(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : int ): A_ : Union[str, Any] = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() with torch.no_grad(): A_ : Union[str, Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) A_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A_ : List[Any] = self.model_tester.num_stages self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common() A_ : Tuple = ['''preactivation''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: A_ : Tuple = layer_type A_ : Optional[Any] = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A_ : List[str] = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) @unittest.skip(reason='''Bit does not use feedforward chunking''' ) def _a ( self : Tuple ): """simple docstring""" pass def _a ( self : str ): """simple docstring""" A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) @slow def _a ( self : Union[str, Any] ): """simple docstring""" for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : List[Any] = BitModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def snake_case__ ( ) -> Optional[int]: A_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : List[Any] ): """simple docstring""" return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _a ( self : Dict ): """simple docstring""" A_ : Optional[int] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowerCamelCase ) A_ : Union[str, Any] = self.default_image_processor A_ : Optional[int] = prepare_img() A_ : int = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase ) # forward pass with torch.no_grad(): A_ : Union[str, Any] = model(**_lowerCamelCase ) # verify the logits A_ : Dict = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) A_ : Tuple = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) ) @require_torch class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = (BitBackbone,) if is_torch_available() else () _lowerCAmelCase = BitConfig _lowerCAmelCase = False def _a ( self : List[str] ): """simple docstring""" A_ : Union[str, Any] = BitModelTester(self )
4
1
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = { """Salesforce/blip-vqa-base""": """https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json""", """Salesforce/blip-vqa-capfit-large""": ( """https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json""" ), """Salesforce/blip-image-captioning-base""": ( """https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json""" ), """Salesforce/blip-image-captioning-large""": ( """https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json""" ), """Salesforce/blip-itm-base-coco""": """https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json""", """Salesforce/blip-itm-large-coco""": """https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json""", """Salesforce/blip-itm-base-flikr""": """https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json""", """Salesforce/blip-itm-large-flikr""": ( """https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json""" ), } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'blip_text_model' def __init__( self : Optional[int] , _lowerCamelCase : List[str]=30524 , _lowerCamelCase : Optional[int]=768 , _lowerCamelCase : Union[str, Any]=768 , _lowerCamelCase : List[Any]=3072 , _lowerCamelCase : Optional[Any]=768 , _lowerCamelCase : Union[str, Any]=12 , _lowerCamelCase : Tuple=8 , _lowerCamelCase : Optional[Any]=512 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : Optional[Any]=1E-12 , _lowerCamelCase : int=0.0 , _lowerCamelCase : str=0.0 , _lowerCamelCase : Dict=0.02 , _lowerCamelCase : Optional[Any]=30522 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : int=0 , _lowerCamelCase : int=102 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : List[str]=True , **_lowerCamelCase : Any , ): """simple docstring""" super().__init__( pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , sep_token_id=_lowerCamelCase , **_lowerCamelCase , ) A_ : List[Any] = vocab_size A_ : Tuple = hidden_size A_ : List[Any] = encoder_hidden_size A_ : Union[str, Any] = intermediate_size A_ : Tuple = projection_dim A_ : Union[str, Any] = hidden_dropout_prob A_ : str = num_hidden_layers A_ : List[Any] = num_attention_heads A_ : str = max_position_embeddings A_ : List[str] = layer_norm_eps A_ : str = hidden_act A_ : List[str] = initializer_range A_ : Union[str, Any] = attention_probs_dropout_prob A_ : List[Any] = is_decoder A_ : Union[str, Any] = use_cache @classmethod def _a ( cls : Optional[Any] , _lowerCamelCase : Union[str, os.PathLike] , **_lowerCamelCase : str ): """simple docstring""" cls._set_token_in_kwargs(_lowerCamelCase ) A_ ,A_ : Dict = cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase ) # get the text config dict if we are loading from BlipConfig if config_dict.get('''model_type''' ) == "blip": A_ : str = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_lowerCamelCase , **_lowerCamelCase ) class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'blip_vision_model' def __init__( self : List[str] , _lowerCamelCase : str=768 , _lowerCamelCase : List[str]=3072 , _lowerCamelCase : Any=512 , _lowerCamelCase : Dict=12 , _lowerCamelCase : Any=12 , _lowerCamelCase : Optional[Any]=384 , _lowerCamelCase : List[Any]=16 , _lowerCamelCase : Dict="gelu" , _lowerCamelCase : int=1E-5 , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Any=1E-10 , **_lowerCamelCase : str , ): """simple docstring""" super().__init__(**_lowerCamelCase ) A_ : Union[str, Any] = hidden_size A_ : int = intermediate_size A_ : List[Any] = projection_dim A_ : Dict = num_hidden_layers A_ : Union[str, Any] = num_attention_heads A_ : Dict = patch_size A_ : Dict = image_size A_ : List[Any] = initializer_range A_ : int = attention_dropout A_ : Optional[Any] = layer_norm_eps A_ : Optional[int] = hidden_act @classmethod def _a ( cls : Dict , _lowerCamelCase : Union[str, os.PathLike] , **_lowerCamelCase : Tuple ): """simple docstring""" cls._set_token_in_kwargs(_lowerCamelCase ) A_ ,A_ : Dict = cls.get_config_dict(_lowerCamelCase , **_lowerCamelCase ) # get the vision config dict if we are loading from BlipConfig if config_dict.get('''model_type''' ) == "blip": A_ : Union[str, Any] = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(_lowerCamelCase , **_lowerCamelCase ) class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'blip' _lowerCAmelCase = True def __init__( self : Union[str, Any] , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : Union[str, Any]=512 , _lowerCamelCase : Union[str, Any]=2.65_92 , _lowerCamelCase : int=256 , **_lowerCamelCase : int , ): """simple docstring""" super().__init__(**_lowerCamelCase ) if text_config is None: A_ : Union[str, Any] = {} logger.info('''`text_config` is `None`. Initializing the `BlipTextConfig` with default values.''' ) if vision_config is None: A_ : Optional[int] = {} logger.info('''`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.''' ) A_ : Union[str, Any] = BlipTextConfig(**_lowerCamelCase ) A_ : Optional[int] = BlipVisionConfig(**_lowerCamelCase ) A_ : List[str] = self.vision_config.hidden_size A_ : List[str] = projection_dim A_ : Union[str, Any] = logit_scale_init_value A_ : Tuple = 1.0 A_ : Optional[Any] = 0.02 A_ : Union[str, Any] = image_text_hidden_size @classmethod def _a ( cls : str , _lowerCamelCase : BlipTextConfig , _lowerCamelCase : BlipVisionConfig , **_lowerCamelCase : Tuple ): """simple docstring""" return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_lowerCamelCase ) def _a ( self : str ): """simple docstring""" A_ : List[Any] = copy.deepcopy(self.__dict__ ) A_ : Optional[int] = self.text_config.to_dict() A_ : str = self.vision_config.to_dict() A_ : Optional[Any] = self.__class__.model_type return output
4
'''simple docstring''' import pprint import requests snake_case__ = """https://zenquotes.io/api""" def snake_case__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '''/today''' ).json() def snake_case__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '''/random''' ).json() if __name__ == "__main__": snake_case__ = random_quotes() pprint.pprint(response)
4
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) snake_case__ = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ = ["""PLBartTokenizer"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ = [ """PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""", """PLBartForCausalLM""", """PLBartForConditionalGeneration""", """PLBartForSequenceClassification""", """PLBartModel""", """PLBartPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys snake_case__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
4
'''simple docstring''' from __future__ import annotations class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[int] , _lowerCamelCase : int ): """simple docstring""" A_ : Union[str, Any] = order # a_{0} ... a_{k} A_ : Union[str, Any] = [1.0] + [0.0] * order # b_{0} ... b_{k} A_ : int = [1.0] + [0.0] * order # x[n-1] ... x[n-k] A_ : str = [0.0] * self.order # y[n-1] ... y[n-k] A_ : Optional[Any] = [0.0] * self.order def _a ( self : Dict , _lowerCamelCase : list[float] , _lowerCamelCase : list[float] ): """simple docstring""" if len(_lowerCamelCase ) < self.order: A_ : Any = [1.0, *a_coeffs] if len(_lowerCamelCase ) != self.order + 1: A_ : List[Any] = ( f'Expected a_coeffs to have {self.order + 1} elements ' f'for {self.order}-order filter, got {len(_lowerCamelCase )}' ) raise ValueError(_lowerCamelCase ) if len(_lowerCamelCase ) != self.order + 1: A_ : Union[str, Any] = ( f'Expected b_coeffs to have {self.order + 1} elements ' f'for {self.order}-order filter, got {len(_lowerCamelCase )}' ) raise ValueError(_lowerCamelCase ) A_ : Tuple = a_coeffs A_ : str = b_coeffs def _a ( self : Tuple , _lowerCamelCase : float ): """simple docstring""" A_ : Any = 0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) A_ : str = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0] A_ : Optional[Any] = self.input_history[:-1] A_ : List[str] = self.output_history[:-1] A_ : Tuple = sample A_ : Tuple = result return result
4
1
'''simple docstring''' import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def snake_case__ ( lowerCamelCase__ : int = 8 ) -> str: A_ : List[Any] = ascii_letters + digits + punctuation return "".join(secrets.choice(lowerCamelCase__ ) for _ in range(lowerCamelCase__ ) ) def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : int ) -> str: # Password Generator = full boot with random_number, random_letters, and # random_character FUNCTIONS # Put your code here... i -= len(lowerCamelCase__ ) A_ : Tuple = i // 3 A_ : Tuple = i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) A_ : List[str] = ( chars_incl + random(lowerCamelCase__ , quotient + remainder ) + random(lowerCamelCase__ , lowerCamelCase__ ) + random(lowerCamelCase__ , lowerCamelCase__ ) ) A_ : int = list(lowerCamelCase__ ) shuffle(lowerCamelCase__ ) return "".join(lowerCamelCase__ ) # random is a generalised function for letters, characters and numbers def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : int ) -> str: return "".join(secrets.choice(lowerCamelCase__ ) for _ in range(lowerCamelCase__ ) ) def snake_case__ ( lowerCamelCase__ : Tuple , lowerCamelCase__ : List[str] ) -> List[Any]: pass # Put your code here... def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int] ) -> int: pass # Put your code here... def snake_case__ ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] ) -> str: pass # Put your code here... def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : int = 8 ) -> bool: if len(lowerCamelCase__ ) < min_length: # Your Password must be at least 8 characters long return False A_ : List[Any] = any(char in ascii_uppercase for char in password ) A_ : Tuple = any(char in ascii_lowercase for char in password ) A_ : Union[str, Any] = any(char in digits for char in password ) A_ : Dict = any(char in punctuation for char in password ) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def snake_case__ ( ) -> Optional[Any]: A_ : Optional[int] = int(input('''Please indicate the max length of your password: ''' ).strip() ) A_ : List[str] = input( '''Please indicate the characters that must be in your password: ''' ).strip() print('''Password generated:''' , password_generator(lowerCamelCase__ ) ) print( '''Alternative Password generated:''' , alternative_password_generator(lowerCamelCase__ , lowerCamelCase__ ) , ) print('''[If you are thinking of using this passsword, You better save it.]''' ) if __name__ == "__main__": main()
4
'''simple docstring''' class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[Any] , _lowerCamelCase : Union[str, Any] ): """simple docstring""" A_ : Union[str, Any] = val A_ : Tuple = None A_ : Any = None def _a ( self : Tuple , _lowerCamelCase : List[Any] ): """simple docstring""" if self.val: if val < self.val: if self.left is None: A_ : int = Node(_lowerCamelCase ) else: self.left.insert(_lowerCamelCase ) elif val > self.val: if self.right is None: A_ : List[str] = Node(_lowerCamelCase ) else: self.right.insert(_lowerCamelCase ) else: A_ : Any = val def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] ) -> str: # Recursive traversal if root: inorder(root.left , lowerCamelCase__ ) res.append(root.val ) inorder(root.right , lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> Tuple: # Build BST if len(lowerCamelCase__ ) == 0: return arr A_ : Dict = Node(arr[0] ) for i in range(1 , len(lowerCamelCase__ ) ): root.insert(arr[i] ) # Traverse BST in order. A_ : Tuple = [] inorder(lowerCamelCase__ , lowerCamelCase__ ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
4
1
'''simple docstring''' import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() snake_case__ = logging.get_logger(__name__) snake_case__ = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } snake_case__ = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple ) -> int: for attribute in key.split('''.''' ): A_ : List[Any] = getattr(lowerCamelCase__ , lowerCamelCase__ ) if weight_type is not None: A_ : Any = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape else: A_ : Union[str, Any] = hf_pointer.shape assert hf_shape == value.shape, ( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": A_ : Tuple = value elif weight_type == "weight_g": A_ : int = value elif weight_type == "weight_v": A_ : Union[str, Any] = value elif weight_type == "bias": A_ : Any = value else: A_ : Any = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def snake_case__ ( lowerCamelCase__ : Tuple , lowerCamelCase__ : List[str] ) -> Optional[int]: A_ : str = [] A_ : int = fairseq_model.state_dict() A_ : Tuple = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight A_ : Optional[int] = None for name, value in fairseq_dict.items(): A_ : List[Any] = False if "conv_layers" in name: load_conv_layer( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == '''group''' , ) A_ : List[str] = True elif name.split('''.''' )[0] == "proj": A_ : List[str] = fairseq_model.proj A_ : Tuple = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: A_ : Optional[Any] = True if "*" in mapped_key: A_ : List[str] = name.split(lowerCamelCase__ )[0].split('''.''' )[-2] A_ : List[str] = mapped_key.replace('''*''' , lowerCamelCase__ ) if "weight_g" in name: A_ : Tuple = '''weight_g''' elif "weight_v" in name: A_ : List[Any] = '''weight_v''' elif "bias" in name: A_ : Union[str, Any] = '''bias''' elif "weight" in name: A_ : Dict = '''weight''' else: A_ : Any = None set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) continue if not is_used: unused_weights.append(lowerCamelCase__ ) logger.warning(f'Unused weights: {unused_weights}' ) return proj_weight def snake_case__ ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] ) -> Optional[int]: A_ : Union[str, Any] = full_name.split('''conv_layers.''' )[-1] A_ : Optional[Any] = name.split('''.''' ) A_ : List[str] = int(items[0] ) A_ : Union[str, Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) A_ : Optional[Any] = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) A_ : Optional[Any] = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was' " found." ) A_ : str = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) A_ : List[str] = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Optional[Any] ) -> Optional[int]: A_ ,A_ : Optional[Any] = emb.weight.shape A_ : Dict = nn.Linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ ) A_ : List[str] = emb.weight.data return lin_layer def snake_case__ ( lowerCamelCase__ : Optional[Any] ) -> Dict: with open(lowerCamelCase__ , '''r''' , encoding='''utf-8''' ) as f: A_ : Optional[int] = f.readlines() A_ : str = [line.split(''' ''' )[0] for line in lines] A_ : Optional[Any] = len(lowerCamelCase__ ) A_ : Optional[Any] = { '''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3, } vocab_dict.update(dict(zip(lowerCamelCase__ , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def snake_case__ ( lowerCamelCase__ : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , ) -> List[Any]: A_ : Union[str, Any] = WavaVecaConfig.from_pretrained(lowerCamelCase__ ) A_ : Optional[int] = SpeechaTextaConfig.from_pretrained( lowerCamelCase__ , vocab_size=lowerCamelCase__ , decoder_layers=lowerCamelCase__ , do_stable_layer_norm=lowerCamelCase__ ) A_ : Union[str, Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , ) A_ ,A_ ,A_ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) A_ : List[Any] = model[0].eval() # set weights for wav2vec2 encoder A_ : Optional[int] = WavaVecaModel(lowerCamelCase__ ) A_ : int = recursively_load_weights_wavaveca(model.encoder , lowerCamelCase__ ) A_ : List[str] = SpeechaTextaForCausalLM(lowerCamelCase__ ) A_ ,A_ : Tuple = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowerCamelCase__ ) # set output linear layer unexpected_keys.remove('''embed_out''' ) A_ : int = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' ) logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' ) A_ : Dict = SpeechEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ ) A_ : Union[str, Any] = False # add projection layer A_ : int = nn.Parameter(projection_layer.weight ) A_ : List[str] = nn.Parameter(projection_layer.bias ) A_ : Union[str, Any] = create_vocab_dict(lowerCamelCase__ ) with open(os.path.join(lowerCamelCase__ , '''vocab.json''' ) , '''w''' ) as fp: json.dump(lowerCamelCase__ , lowerCamelCase__ ) A_ : List[Any] = SpeechaTextaTokenizer(os.path.join(lowerCamelCase__ , '''vocab.json''' ) ) tokenizer.save_pretrained(lowerCamelCase__ ) A_ : List[str] = hf_wavavec.config.to_dict() A_ : Optional[Any] = tokenizer.pad_token_id A_ : int = tokenizer.bos_token_id A_ : Optional[Any] = tokenizer.eos_token_id A_ : str = '''speech_to_text_2''' A_ : Dict = '''wav2vec2''' A_ : Dict = SpeechEncoderDecoderConfig.from_dict(lowerCamelCase__ ) hf_wavavec.save_pretrained(lowerCamelCase__ ) feature_extractor.save_pretrained(lowerCamelCase__ ) if __name__ == "__main__": snake_case__ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument( """--encoder_config_path""", default="""facebook/wav2vec2-large-lv60""", type=str, help="""Path to hf encoder wav2vec2 checkpoint config""", ) parser.add_argument( """--decoder_config_path""", default="""facebook/s2t-small-mustc-en-fr-st""", type=str, help="""Path to hf decoder s2t checkpoint config""", ) parser.add_argument("""--vocab_size""", default=1_02_24, type=int, help="""Vocab size of decoder""") parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""") snake_case__ = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
4
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : list ) -> list: if len(lowerCamelCase__ ) <= 1: return [tuple(lowerCamelCase__ )] A_ : List[str] = [] def generate(lowerCamelCase__ : int , lowerCamelCase__ : list ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , lowerCamelCase__ ) for i in range(k - 1 ): if k % 2 == 0: # k is even A_ ,A_ : Optional[int] = arr[k - 1], arr[i] else: # k is odd A_ ,A_ : Union[str, Any] = arr[k - 1], arr[0] generate(k - 1 , lowerCamelCase__ ) generate(len(lowerCamelCase__ ) , lowerCamelCase__ ) return res if __name__ == "__main__": snake_case__ = input("""Enter numbers separated by a comma:\n""").strip() snake_case__ = [int(item) for item in user_input.split(""",""")] print(heaps(arr))
4
1
'''simple docstring''' import argparse import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## snake_case__ = 16 snake_case__ = 32 def snake_case__ ( lowerCamelCase__ : Accelerator , lowerCamelCase__ : int = 1_6 ) -> List[Any]: A_ : int = AutoTokenizer.from_pretrained('''bert-base-cased''' ) A_ : List[Any] = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(lowerCamelCase__ : Any ): # max_length=None => use the model max length (it's actually the default) A_ : Any = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): A_ : Dict = datasets.map( lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A_ : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(lowerCamelCase__ : List[str] ): # On TPU it's best to pad everything to the same length or training will be very slow. A_ : Optional[int] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": A_ : str = 1_6 elif accelerator.mixed_precision != "no": A_ : str = 8 else: A_ : Dict = None return tokenizer.pad( lowerCamelCase__ , padding='''longest''' , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_tensors='''pt''' , ) # Instantiate dataloaders. A_ : List[str] = DataLoader( tokenized_datasets['''train'''] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ , drop_last=lowerCamelCase__ ) A_ : str = DataLoader( tokenized_datasets['''validation'''] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ , drop_last=(accelerator.mixed_precision == '''fp8''') , ) return train_dataloader, eval_dataloader def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : int ) -> Optional[Any]: # Initialize accelerator A_ : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A_ : Optional[Any] = config['''lr'''] A_ : List[str] = int(config['''num_epochs'''] ) A_ : Optional[Any] = int(config['''seed'''] ) A_ : Optional[int] = int(config['''batch_size'''] ) A_ : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation A_ : List[Any] = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: A_ : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE A_ : Any = MAX_GPU_BATCH_SIZE set_seed(lowerCamelCase__ ) A_ ,A_ : List[Any] = get_dataloaders(lowerCamelCase__ , lowerCamelCase__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCamelCase__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). A_ : int = model.to(accelerator.device ) # Instantiate optimizer A_ : List[Any] = AdamW(params=model.parameters() , lr=lowerCamelCase__ ) # Instantiate scheduler A_ : Tuple = get_linear_schedule_with_warmup( optimizer=lowerCamelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCamelCase__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A_ ,A_ ,A_ ,A_ ,A_ : Optional[int] = accelerator.prepare( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Now we train the model for epoch in range(lowerCamelCase__ ): model.train() for step, batch in enumerate(lowerCamelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) A_ : List[str] = model(**lowerCamelCase__ ) A_ : Tuple = outputs.loss A_ : str = loss / gradient_accumulation_steps accelerator.backward(lowerCamelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCamelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): A_ : List[str] = model(**lowerCamelCase__ ) A_ : Dict = outputs.logits.argmax(dim=-1 ) A_ ,A_ : Tuple = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=lowerCamelCase__ , references=lowerCamelCase__ , ) A_ : int = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'epoch {epoch}:' , lowerCamelCase__ ) def snake_case__ ( ) -> Optional[Any]: A_ : Tuple = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=lowerCamelCase__ , default=lowerCamelCase__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) A_ : int = parser.parse_args() A_ : Dict = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6} training_function(lowerCamelCase__ , lowerCamelCase__ ) if __name__ == "__main__": main()
4
'''simple docstring''' import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : Dict ): """simple docstring""" A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : Tuple = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : Dict = -1 A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : Any = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) A_ : List[str] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: A_ : List[str] = TextStreamer(_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer A_ : Dict = cs.out[:-1] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : Dict = -1 A_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : Optional[int] = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) A_ : str = tokenizer.decode(greedy_ids[0] ) A_ : int = TextIteratorStreamer(_lowerCamelCase ) A_ : List[Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} A_ : List[Any] = Thread(target=model.generate , kwargs=_lowerCamelCase ) thread.start() A_ : List[Any] = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : int ): """simple docstring""" A_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : List[str] = -1 A_ : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : Tuple = model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase ) A_ : Tuple = greedy_ids[:, input_ids.shape[1] :] A_ : Tuple = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: A_ : Any = TextStreamer(_lowerCamelCase , skip_prompt=_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=10 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer A_ : Any = cs.out[:-1] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : List[Any] ): """simple docstring""" A_ : List[Any] = AutoTokenizer.from_pretrained('''distilgpt2''' ) A_ : Tuple = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowerCamelCase ) A_ : List[Any] = -1 A_ : Union[str, Any] = torch.ones((1, 5) , device=_lowerCamelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: A_ : List[Any] = TextStreamer(_lowerCamelCase , skip_special_tokens=_lowerCamelCase ) model.generate(_lowerCamelCase , max_new_tokens=1 , do_sample=_lowerCamelCase , streamer=_lowerCamelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token A_ : List[str] = cs.out[:-1] # Remove the final "\n" A_ : List[Any] = tokenizer(_lowerCamelCase , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : str = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowerCamelCase ) A_ : Union[str, Any] = -1 A_ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCamelCase ) A_ : List[str] = TextIteratorStreamer(_lowerCamelCase , timeout=0.0_01 ) A_ : str = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} A_ : List[str] = Thread(target=model.generate , kwargs=_lowerCamelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_lowerCamelCase ): A_ : str = '''''' for new_text in streamer: streamer_text += new_text
4
1
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging snake_case__ = logging.get_logger(__name__) class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = ['pixel_values'] def __init__( self : int , _lowerCamelCase : bool = True , _lowerCamelCase : int = 32 , _lowerCamelCase : List[str]=PILImageResampling.BILINEAR , _lowerCamelCase : bool = True , **_lowerCamelCase : Union[str, Any] , ): """simple docstring""" A_ : Optional[Any] = do_resize A_ : List[str] = do_rescale A_ : Union[str, Any] = size_divisor A_ : Any = resample super().__init__(**_lowerCamelCase ) def _a ( self : Optional[int] , _lowerCamelCase : np.ndarray , _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : Optional[ChannelDimension] = None , **_lowerCamelCase : Optional[Any] ): """simple docstring""" A_ ,A_ : Tuple = get_image_size(_lowerCamelCase ) # Rounds the height and width down to the closest multiple of size_divisor A_ : Union[str, Any] = height // size_divisor * size_divisor A_ : Dict = width // size_divisor * size_divisor A_ : Any = resize(_lowerCamelCase , (new_h, new_w) , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase ) return image def _a ( self : List[str] , _lowerCamelCase : np.ndarray , _lowerCamelCase : float , _lowerCamelCase : Optional[ChannelDimension] = None , **_lowerCamelCase : Optional[int] ): """simple docstring""" return rescale(image=_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase ) def _a ( self : Optional[Any] , _lowerCamelCase : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , _lowerCamelCase : Optional[bool] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : str=None , _lowerCamelCase : Optional[bool] = None , _lowerCamelCase : Optional[Union[TensorType, str]] = None , _lowerCamelCase : ChannelDimension = ChannelDimension.FIRST , **_lowerCamelCase : Any , ): """simple docstring""" A_ : Dict = do_resize if do_resize is not None else self.do_resize A_ : List[str] = do_rescale if do_rescale is not None else self.do_rescale A_ : str = size_divisor if size_divisor is not None else self.size_divisor A_ : Dict = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('''size_divisor is required for resizing''' ) A_ : Tuple = make_list_of_images(_lowerCamelCase ) if not valid_images(_lowerCamelCase ): raise ValueError('''Invalid image(s)''' ) # All transformations expect numpy arrays. A_ : Dict = [to_numpy_array(_lowerCamelCase ) for img in images] if do_resize: A_ : Tuple = [self.resize(_lowerCamelCase , size_divisor=_lowerCamelCase , resample=_lowerCamelCase ) for image in images] if do_rescale: A_ : str = [self.rescale(_lowerCamelCase , scale=1 / 255 ) for image in images] A_ : List[str] = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images] A_ : Union[str, Any] = {'''pixel_values''': images} return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase )
4
'''simple docstring''' import heapq def snake_case__ ( lowerCamelCase__ : dict ) -> set[int]: A_ : list[list] = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(lowerCamelCase__ , [-1 * len(lowerCamelCase__ ), (key, value)] ) # chosen_vertices = set of chosen vertices A_ : str = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices A_ : Tuple = heapq.heappop(lowerCamelCase__ )[1][0] chosen_vertices.add(lowerCamelCase__ ) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: A_ : List[str] = elem[1][1].index(lowerCamelCase__ ) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(lowerCamelCase__ ) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() snake_case__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
4
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = { '''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''', # See all Donut models at https://huggingface.co/models?filter=donut-swin } class UpperCamelCase_ (SCREAMING_SNAKE_CASE_ ): """simple docstring""" _lowerCAmelCase = 'donut-swin' _lowerCAmelCase = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : str , _lowerCamelCase : str=224 , _lowerCamelCase : List[Any]=4 , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : Union[str, Any]=96 , _lowerCamelCase : List[str]=[2, 2, 6, 2] , _lowerCamelCase : List[str]=[3, 6, 12, 24] , _lowerCamelCase : str=7 , _lowerCamelCase : Tuple=4.0 , _lowerCamelCase : Tuple=True , _lowerCamelCase : Any=0.0 , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Union[str, Any]=0.1 , _lowerCamelCase : Optional[Any]="gelu" , _lowerCamelCase : List[Any]=False , _lowerCamelCase : List[Any]=0.02 , _lowerCamelCase : List[str]=1E-5 , **_lowerCamelCase : Any , ): """simple docstring""" super().__init__(**__a ) A_ : Optional[Any] = image_size A_ : int = patch_size A_ : Optional[Any] = num_channels A_ : List[Any] = embed_dim A_ : List[Any] = depths A_ : Dict = len(__a ) A_ : Dict = num_heads A_ : Any = window_size A_ : Tuple = mlp_ratio A_ : Any = qkv_bias A_ : str = hidden_dropout_prob A_ : int = attention_probs_dropout_prob A_ : List[str] = drop_path_rate A_ : str = hidden_act A_ : Union[str, Any] = use_absolute_embeddings A_ : Optional[Any] = layer_norm_eps A_ : List[str] = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model A_ : int = int(embed_dim * 2 ** (len(__a ) - 1) )
350
'''simple docstring''' import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() snake_case__ = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) snake_case__ = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias')) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias')) rename_keys.append( (F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight', F'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias', F'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias')) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias')) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""), ("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ] ) def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ) -> Optional[Any]: A_ : Tuple = state_dict.pop(lowerCamelCase__ ) A_ : Optional[Any] = val def snake_case__ ( lowerCamelCase__ : Dict ) -> Any: A_ : int = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: A_ : int = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' ) A_ : List[str] = value else: A_ : Optional[int] = value return new_state_dict def snake_case__ ( lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]: A_ : Any = '''''' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) A_ : Tuple = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' ) A_ : Dict = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict A_ : str = in_proj_weight[:2_5_6, :] A_ : Optional[Any] = in_proj_bias[:2_5_6] A_ : Dict = in_proj_weight[2_5_6:5_1_2, :] A_ : Tuple = in_proj_bias[2_5_6:5_1_2] A_ : Tuple = in_proj_weight[-2_5_6:, :] A_ : Optional[int] = in_proj_bias[-2_5_6:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention A_ : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' ) A_ : Dict = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict A_ : List[str] = in_proj_weight[:2_5_6, :] A_ : int = in_proj_bias[:2_5_6] A_ : Any = in_proj_weight[2_5_6:5_1_2, :] A_ : List[str] = in_proj_bias[2_5_6:5_1_2] A_ : Union[str, Any] = in_proj_weight[-2_5_6:, :] A_ : Optional[Any] = in_proj_bias[-2_5_6:] # read in weights + bias of input projection layer of cross-attention A_ : Tuple = state_dict.pop( f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' ) A_ : Optional[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' ) # next, add query, keys and values (in that order) of cross-attention to the state dict A_ : Dict = in_proj_weight_cross_attn[:2_5_6, :] A_ : Tuple = in_proj_bias_cross_attn[:2_5_6] A_ : int = in_proj_weight_cross_attn[2_5_6:5_1_2, :] A_ : List[str] = in_proj_bias_cross_attn[2_5_6:5_1_2] A_ : Any = in_proj_weight_cross_attn[-2_5_6:, :] A_ : Any = in_proj_bias_cross_attn[-2_5_6:] def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ) -> Dict: A_ ,A_ : int = image.size A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ ) A_ : Optional[Any] = 8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0 A_ : Union[str, Any] = target_max_size / current_max_size A_ : Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def snake_case__ ( lowerCamelCase__ : Tuple ) -> str: A_ : Any = F.to_tensor(lowerCamelCase__ ) A_ : Optional[Any] = F.normalize(lowerCamelCase__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ) return image @torch.no_grad() def snake_case__ ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> str: logger.info('''Converting model...''' ) # load original state dict A_ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location='''cpu''' ) # rename keys for src, dest in rename_keys: rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) A_ : str = rename_backbone_keys(lowerCamelCase__ ) # query, key and value matrices need special treatment read_in_q_k_v(lowerCamelCase__ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them A_ : List[Any] = '''model.''' for key in state_dict.copy().keys(): if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ): A_ : List[Any] = state_dict.pop(lowerCamelCase__ ) A_ : str = val # create HuggingFace model and load state dict A_ : Union[str, Any] = TableTransformerConfig( backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: A_ : Dict = 1_5 A_ : Dict = 2 A_ : int = {0: '''table''', 1: '''table rotated'''} A_ : List[str] = idalabel A_ : Optional[int] = {v: k for k, v in idalabel.items()} else: A_ : Union[str, Any] = 1_2_5 A_ : Optional[Any] = 6 A_ : Optional[Any] = { 0: '''table''', 1: '''table column''', 2: '''table row''', 3: '''table column header''', 4: '''table projected row header''', 5: '''table spanning cell''', } A_ : int = idalabel A_ : Tuple = {v: k for k, v in idalabel.items()} A_ : Optional[Any] = DetrImageProcessor( format='''coco_detection''' , max_size=8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0 ) A_ : int = TableTransformerForObjectDetection(lowerCamelCase__ ) model.load_state_dict(lowerCamelCase__ ) model.eval() # verify our conversion A_ : Optional[int] = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png''' A_ : Union[str, Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=lowerCamelCase__ ) A_ : Tuple = Image.open(lowerCamelCase__ ).convert('''RGB''' ) A_ : int = normalize(resize(lowerCamelCase__ , lowerCamelCase__ ) ).unsqueeze(0 ) A_ : str = model(lowerCamelCase__ ) if "detection" in checkpoint_url: A_ : str = (1, 1_5, 3) A_ : int = torch.tensor( [[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] ) A_ : Tuple = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] ) else: A_ : Optional[int] = (1, 1_2_5, 7) A_ : Dict = torch.tensor( [[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] ) A_ : Any = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' ) Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ ) model.save_pretrained(lowerCamelCase__ ) image_processor.save_pretrained(lowerCamelCase__ ) if push_to_hub: # Push model to HF hub logger.info('''Pushing model to the hub...''' ) A_ : List[Any] = ( '''microsoft/table-transformer-detection''' if '''detection''' in checkpoint_url else '''microsoft/table-transformer-structure-recognition''' ) model.push_to_hub(lowerCamelCase__ ) image_processor.push_to_hub(lowerCamelCase__ ) if __name__ == "__main__": snake_case__ = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", type=str, choices=[ """https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""", """https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""", ], help="""URL of the Table Transformer checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) snake_case__ = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
4
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) snake_case__ = { """configuration_albert""": ["""ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AlbertConfig""", """AlbertOnnxConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ = ["""AlbertTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ = ["""AlbertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ = [ """ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """AlbertForMaskedLM""", """AlbertForMultipleChoice""", """AlbertForPreTraining""", """AlbertForQuestionAnswering""", """AlbertForSequenceClassification""", """AlbertForTokenClassification""", """AlbertModel""", """AlbertPreTrainedModel""", """load_tf_weights_in_albert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ = [ """TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFAlbertForMaskedLM""", """TFAlbertForMultipleChoice""", """TFAlbertForPreTraining""", """TFAlbertForQuestionAnswering""", """TFAlbertForSequenceClassification""", """TFAlbertForTokenClassification""", """TFAlbertMainLayer""", """TFAlbertModel""", """TFAlbertPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ = [ """FlaxAlbertForMaskedLM""", """FlaxAlbertForMultipleChoice""", """FlaxAlbertForPreTraining""", """FlaxAlbertForQuestionAnswering""", """FlaxAlbertForSequenceClassification""", """FlaxAlbertForTokenClassification""", """FlaxAlbertModel""", """FlaxAlbertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert import AlbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert_fast import AlbertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_albert import ( ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, AlbertPreTrainedModel, load_tf_weights_in_albert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_albert import ( TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFAlbertForMaskedLM, TFAlbertForMultipleChoice, TFAlbertForPreTraining, TFAlbertForQuestionAnswering, TFAlbertForSequenceClassification, TFAlbertForTokenClassification, TFAlbertMainLayer, TFAlbertModel, TFAlbertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, FlaxAlbertPreTrainedModel, ) else: import sys snake_case__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
351
'''simple docstring''' import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) snake_case__ = logging.getLogger(__name__) @dataclass(frozen=a__ ) class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None @dataclass(frozen=a__ ) class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None if is_torch_available(): import torch from torch.utils.data import Dataset class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 42 def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : List[Any]=False , _lowerCamelCase : bool = False , ): """simple docstring""" A_ : Optional[int] = hans_processors[task]() A_ : int = os.path.join( _lowerCamelCase , '''cached_{}_{}_{}_{}'''.format( '''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(_lowerCamelCase ) , _lowerCamelCase , ) , ) A_ : Dict = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) A_ ,A_ : List[str] = label_list[2], label_list[1] A_ : Optional[int] = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. A_ : str = cached_features_file + '''.lock''' with FileLock(_lowerCamelCase ): if os.path.exists(_lowerCamelCase ) and not overwrite_cache: logger.info(f'Loading features from cached file {cached_features_file}' ) A_ : List[str] = torch.load(_lowerCamelCase ) else: logger.info(f'Creating features from dataset file at {data_dir}' ) A_ : Optional[int] = ( processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase ) ) logger.info('''Training examples: %s''' , len(_lowerCamelCase ) ) A_ : Optional[int] = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) logger.info('''Saving features into cached file %s''' , _lowerCamelCase ) torch.save(self.features , _lowerCamelCase ) def __len__( self : List[str] ): """simple docstring""" return len(self.features ) def __getitem__( self : List[str] , _lowerCamelCase : Optional[int] ): """simple docstring""" return self.features[i] def _a ( self : str ): """simple docstring""" return self.label_list if is_tf_available(): import tensorflow as tf class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 def __init__( self : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = 128 , _lowerCamelCase : Dict=False , _lowerCamelCase : bool = False , ): """simple docstring""" A_ : Optional[int] = hans_processors[task]() A_ : Optional[int] = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) A_ ,A_ : Union[str, Any] = label_list[2], label_list[1] A_ : Tuple = label_list A_ : Optional[int] = processor.get_dev_examples(_lowerCamelCase ) if evaluate else processor.get_train_examples(_lowerCamelCase ) A_ : Tuple = hans_convert_examples_to_features(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ): if ex_index % 10000 == 0: logger.info('''Writing example %d of %d''' % (ex_index, len(_lowerCamelCase )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) A_ : List[Any] = tf.data.Dataset.from_generator( _lowerCamelCase , ( { '''example_id''': tf.intaa, '''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa, }, tf.intaa, ) , ( { '''example_id''': tf.TensorShape([] ), '''input_ids''': tf.TensorShape([None, None] ), '''attention_mask''': tf.TensorShape([None, None] ), '''token_type_ids''': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def _a ( self : Any ): """simple docstring""" return self.dataset def __len__( self : Dict ): """simple docstring""" return len(self.features ) def __getitem__( self : Optional[int] , _lowerCamelCase : List[str] ): """simple docstring""" return self.features[i] def _a ( self : Tuple ): """simple docstring""" return self.label_list class UpperCamelCase_ (a__ ): """simple docstring""" def _a ( self : List[str] , _lowerCamelCase : Union[str, Any] ): """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_train_set.txt''' ) ) , '''train''' ) def _a ( self : List[str] , _lowerCamelCase : Tuple ): """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(_lowerCamelCase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' ) def _a ( self : Any ): """simple docstring""" return ["contradiction", "entailment", "neutral"] def _a ( self : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ): """simple docstring""" A_ : Tuple = [] for i, line in enumerate(_lowerCamelCase ): if i == 0: continue A_ : str = '''%s-%s''' % (set_type, line[0]) A_ : Optional[Any] = line[5] A_ : Union[str, Any] = line[6] A_ : List[str] = line[7][2:] if line[7].startswith('''ex''' ) else line[7] A_ : str = line[0] examples.append(InputExample(guid=_lowerCamelCase , text_a=_lowerCamelCase , text_b=_lowerCamelCase , label=_lowerCamelCase , pairID=_lowerCamelCase ) ) return examples def snake_case__ ( lowerCamelCase__ : List[InputExample] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : PreTrainedTokenizer , ) -> int: A_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase__ )} A_ : Optional[Any] = [] for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase__ ) , desc='''convert examples to features''' ): if ex_index % 1_0_0_0_0 == 0: logger.info('''Writing example %d''' % (ex_index) ) A_ : Optional[int] = tokenizer( example.text_a , example.text_b , add_special_tokens=lowerCamelCase__ , max_length=lowerCamelCase__ , padding='''max_length''' , truncation=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , ) A_ : List[str] = label_map[example.label] if example.label in label_map else 0 A_ : Tuple = int(example.pairID ) features.append(InputFeatures(**lowerCamelCase__ , label=lowerCamelCase__ , pairID=lowerCamelCase__ ) ) for i, example in enumerate(examples[:5] ): logger.info('''*** Example ***''' ) logger.info(f'guid: {example}' ) logger.info(f'features: {features[i]}' ) return features snake_case__ = { """hans""": 3, } snake_case__ = { """hans""": HansProcessor, }
4
0
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : Any ) -> bool: return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print("""Program to check whether a number is a Perfect number or not...""") snake_case__ = int(input("""Enter number: """).strip()) print(F'{number} is {"" if perfect(number) else "not "}a Perfect Number.')
352
'''simple docstring''' import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline snake_case__ = datasets.utils.logging.get_logger(__name__) @dataclass class UpperCamelCase_ (datasets.BuilderConfig ): """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = "utf-8" _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = True # deprecated _lowerCAmelCase = None # deprecated _lowerCAmelCase = 1_0 << 2_0 # 10MB _lowerCAmelCase = None class UpperCamelCase_ (datasets.ArrowBasedBuilder ): """simple docstring""" _lowerCAmelCase = JsonConfig def _a ( self : int ): """simple docstring""" if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' ) A_ : List[Any] = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' ) if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' ) return datasets.DatasetInfo(features=self.config.features ) def _a ( self : Any , _lowerCamelCase : List[str] ): """simple docstring""" if not self.config.data_files: raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' ) A_ : int = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_lowerCamelCase , (str, list, tuple) ): A_ : Union[str, Any] = data_files if isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : List[str] = [files] A_ : List[Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] A_ : Tuple = [] for split_name, files in data_files.items(): if isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : int = [files] A_ : Union[str, Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files] splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'''files''': files} ) ) return splits def _a ( self : int , _lowerCamelCase : pa.Table ): """simple docstring""" if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): A_ : Optional[int] = self.config.features.arrow_schema.field(_lowerCamelCase ).type A_ : Optional[int] = pa_table.append_column(_lowerCamelCase , pa.array([None] * len(_lowerCamelCase ) , type=_lowerCamelCase ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example A_ : str = table_cast(_lowerCamelCase , self.config.features.arrow_schema ) return pa_table def _a ( self : List[str] , _lowerCamelCase : int ): """simple docstring""" for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: A_ : int = json.load(_lowerCamelCase ) # We keep only the field we are interested in A_ : List[str] = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(_lowerCamelCase , (list, tuple) ): A_ : int = set().union(*[row.keys() for row in dataset] ) A_ : List[str] = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys} else: A_ : Tuple = dataset A_ : Dict = pa.Table.from_pydict(_lowerCamelCase ) yield file_idx, self._cast_table(_lowerCamelCase ) # If the file has one json object per line else: with open(_lowerCamelCase , '''rb''' ) as f: A_ : int = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small A_ : int = max(self.config.chunksize // 32 , 16 << 10 ) A_ : int = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: A_ : Any = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(_lowerCamelCase ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": A_ : Optional[Any] = batch.decode(self.config.encoding , errors=_lowerCamelCase ).encode('''utf-8''' ) try: while True: try: A_ : List[Any] = paj.read_json( io.BytesIO(_lowerCamelCase ) , read_options=paj.ReadOptions(block_size=_lowerCamelCase ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(_lowerCamelCase , pa.ArrowInvalid ) and "straddling" not in str(_lowerCamelCase ) or block_size > len(_lowerCamelCase ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f'Batch of {len(_lowerCamelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.' ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( _lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: A_ : Optional[Any] = json.load(_lowerCamelCase ) except json.JSONDecodeError: logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(_lowerCamelCase , _lowerCamelCase ): # list is the only sequence type supported in JSON try: A_ : Optional[int] = set().union(*[row.keys() for row in dataset] ) A_ : Tuple = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys} A_ : int = pa.Table.from_pydict(_lowerCamelCase ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' ) raise ValueError(f'Not able to read records in the JSON file at {file}.' ) from None yield file_idx, self._cast_table(_lowerCamelCase ) break else: logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' ) raise ValueError( f'Not able to read records in the JSON file at {file}. ' f'You should probably indicate the field of the JSON file containing your records. ' f'This JSON file contain the following fields: {str(list(dataset.keys() ) )}. ' f'Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ' ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_lowerCamelCase ) batch_idx += 1
4
0
snake_case__ = """Alexander Joslin""" import operator as op from .stack import Stack def snake_case__ ( lowerCamelCase__ : str ) -> int: A_ : Tuple = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub} A_ : int = Stack() A_ : Any = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(__lowerCAmelCase ) ) elif i in operators: # RULE 2 operator_stack.push(__lowerCAmelCase ) elif i == ")": # RULE 4 A_ : Tuple = operator_stack.peek() operator_stack.pop() A_ : int = operand_stack.peek() operand_stack.pop() A_ : Dict = operand_stack.peek() operand_stack.pop() A_ : List[str] = operators[opr](__lowerCAmelCase , __lowerCAmelCase ) operand_stack.push(__lowerCAmelCase ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": snake_case__ = """(5 + ((4 * 2) * (2 + 3)))""" # answer = 45 print(F'{equation} = {dijkstras_two_stack_algorithm(equation)}')
353
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices snake_case__ = logging.get_logger(__name__) snake_case__ = { """microsoft/swin-tiny-patch4-window7-224""": ( """https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json""" ), # See all Swin models at https://huggingface.co/models?filter=swin } class UpperCamelCase_ (a__, a__ ): """simple docstring""" _lowerCAmelCase = 'swin' _lowerCAmelCase = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : Any , _lowerCamelCase : Optional[Any]=224 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Tuple=96 , _lowerCamelCase : List[Any]=[2, 2, 6, 2] , _lowerCamelCase : List[str]=[3, 6, 12, 24] , _lowerCamelCase : List[Any]=7 , _lowerCamelCase : Optional[int]=4.0 , _lowerCamelCase : List[str]=True , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Any=0.0 , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : Tuple=False , _lowerCamelCase : Dict=0.02 , _lowerCamelCase : Optional[Any]=1E-5 , _lowerCamelCase : Any=32 , _lowerCamelCase : Tuple=None , _lowerCamelCase : Any=None , **_lowerCamelCase : str , ): """simple docstring""" super().__init__(**_lowerCamelCase ) A_ : Optional[int] = image_size A_ : Optional[int] = patch_size A_ : Optional[int] = num_channels A_ : Any = embed_dim A_ : List[Any] = depths A_ : Any = len(_lowerCamelCase ) A_ : List[Any] = num_heads A_ : Tuple = window_size A_ : Tuple = mlp_ratio A_ : Dict = qkv_bias A_ : List[str] = hidden_dropout_prob A_ : List[str] = attention_probs_dropout_prob A_ : Any = drop_path_rate A_ : List[Any] = hidden_act A_ : Tuple = use_absolute_embeddings A_ : int = layer_norm_eps A_ : Optional[Any] = initializer_range A_ : Union[str, Any] = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model A_ : str = int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) ) A_ : str = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(_lowerCamelCase ) + 1 )] A_ ,A_ : Optional[Any] = get_aligned_output_features_output_indices( out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names ) class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = version.parse('1.11' ) @property def _a ( self : str ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def _a ( self : Union[str, Any] ): """simple docstring""" return 1E-4
4
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = { """facebook/data2vec-vision-base-ft""": ( """https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json""" ), } class UpperCamelCase_ (lowercase__ ): """simple docstring""" _lowerCAmelCase : Optional[int] = '''data2vec-vision''' def __init__( self : List[Any] , _lowerCamelCase : Optional[Any]=768 , _lowerCamelCase : Optional[int]=12 , _lowerCamelCase : Any=12 , _lowerCamelCase : Dict=3072 , _lowerCamelCase : Tuple="gelu" , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Any=0.02 , _lowerCamelCase : int=1E-12 , _lowerCamelCase : Union[str, Any]=224 , _lowerCamelCase : Optional[Any]=16 , _lowerCamelCase : int=3 , _lowerCamelCase : Dict=False , _lowerCamelCase : Tuple=False , _lowerCamelCase : List[str]=False , _lowerCamelCase : Tuple=False , _lowerCamelCase : List[str]=0.1 , _lowerCamelCase : int=0.1 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : str=[3, 5, 7, 11] , _lowerCamelCase : Any=[1, 2, 3, 6] , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Dict=0.4 , _lowerCamelCase : Optional[Any]=256 , _lowerCamelCase : List[str]=1 , _lowerCamelCase : Dict=False , _lowerCamelCase : int=255 , **_lowerCamelCase : List[str] , ): """simple docstring""" super().__init__(**_a ) A_ : str = hidden_size A_ : Optional[Any] = num_hidden_layers A_ : int = num_attention_heads A_ : int = intermediate_size A_ : List[Any] = hidden_act A_ : Union[str, Any] = hidden_dropout_prob A_ : Optional[int] = attention_probs_dropout_prob A_ : str = initializer_range A_ : Any = layer_norm_eps A_ : List[str] = image_size A_ : List[Any] = patch_size A_ : Optional[Any] = num_channels A_ : Tuple = use_mask_token A_ : List[str] = use_absolute_position_embeddings A_ : Tuple = use_relative_position_bias A_ : str = use_shared_relative_position_bias A_ : str = layer_scale_init_value A_ : Dict = drop_path_rate A_ : Union[str, Any] = use_mean_pooling # decode head attributes (semantic segmentation) A_ : str = out_indices A_ : List[str] = pool_scales # auxiliary head attributes (semantic segmentation) A_ : Dict = use_auxiliary_head A_ : Optional[int] = auxiliary_loss_weight A_ : List[str] = auxiliary_channels A_ : Union[str, Any] = auxiliary_num_convs A_ : Optional[Any] = auxiliary_concat_input A_ : Union[str, Any] = semantic_loss_ignore_index class UpperCamelCase_ (lowercase__ ): """simple docstring""" _lowerCAmelCase : str = version.parse('1.11' ) @property def _a ( self : Optional[int] ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def _a ( self : Union[str, Any] ): """simple docstring""" return 1E-4
354
'''simple docstring''' from __future__ import annotations def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> list[int]: A_ : int = 0 A_ : str = len(lowerCamelCase__ ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: A_ : Tuple = i + 1 else: A_ : List[str] = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(F'{two_pointer([2, 7, 11, 15], 9) = }')
4
0
'''simple docstring''' snake_case__ = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)] def snake_case__ ( lowerCamelCase__ : int ) -> int: A_ : int = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0] number //= 1_0_0_0_0_0 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution snake_case__ = [None] * 10_00_00_00 snake_case__ = True snake_case__ = False def snake_case__ ( lowerCamelCase__ : int ) -> bool: if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore A_ : Union[str, Any] = chain(next_number(__snake_case ) ) A_ : int = number_chain while number < 1_0_0_0_0_0_0_0: A_ : Optional[int] = number_chain number *= 1_0 return number_chain def snake_case__ ( lowerCamelCase__ : int = 1_0_0_0_0_0_0_0 ) -> int: for i in range(1 , __snake_case ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod() print(F'{solution() = }')
355
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : list[int] , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool: return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(lowerCamelCase__ ) ) def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool: # Base Case if index == len(lowerCamelCase__ ): return True # Recursive Step for i in range(lowerCamelCase__ ): if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ): # Color current vertex A_ : int = i # Validate coloring if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ): return True # Backtrack A_ : str = -1 return False def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[int]: A_ : List[str] = [-1] * len(lowerCamelCase__ ) if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ): return colored_vertices return []
4
0
'''simple docstring''' import os import jsonlines import numpy as np from tqdm import tqdm snake_case__ = 20_48 snake_case__ = 40_96 snake_case__ = 42 snake_case__ = os.environ.pop("""PROCESS_TRAIN""", """false""") snake_case__ = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4} def snake_case__ ( lowerCamelCase__ : List[Any] ) -> List[str]: def choose_first(lowerCamelCase__ : Any , lowerCamelCase__ : str=False ): assert isinstance(__A , __A ) if len(__A ) == 1: A_ : Optional[int] = answer[0] return {k: [answer[k]] for k in answer} if is_long_answer else answer for a in answer: if is_long_answer: A_ : Tuple = {k: [a[k]] for k in a} if len(a['''start_token'''] ) > 0: break return a A_ : Optional[Any] = {'''id''': example['''id''']} A_ : Optional[Any] = example['''annotations'''] A_ : Dict = annotation['''yes_no_answer'''] if 0 in yes_no_answer or 1 in yes_no_answer: A_ : str = ['''yes'''] if 1 in yes_no_answer else ['''no'''] A_ : Any = [] A_ : List[str] = [] A_ : Optional[Any] = ['''<cls>'''] else: A_ : List[Any] = ['''short'''] A_ : str = choose_first(annotation['''short_answers'''] ) if len(out['''start_token'''] ) == 0: # answer will be long if short is not available A_ : List[Any] = ['''long'''] A_ : Optional[Any] = choose_first(annotation['''long_answer'''] , is_long_answer=__A ) A_ : Optional[Any] = [] answer.update(__A ) # disregard some samples if len(answer['''start_token'''] ) > 1 or answer["start_token"] == answer["end_token"]: A_ : Any = True else: A_ : Optional[int] = False A_ : Tuple = ['''start_token''', '''end_token''', '''start_byte''', '''end_byte''', '''text'''] if not all(isinstance(answer[k] , __A ) for k in cols ): raise ValueError('''Issue in ID''' , example['''id'''] ) return answer def snake_case__ ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any=False ) -> str: A_ : List[str] = _get_single_answer(__A ) # bytes are of no use del answer["start_byte"] del answer["end_byte"] # handle yes_no answers explicitly if answer["category"][0] in ["yes", "no"]: # category is list with one element A_ : Any = example['''document''']['''tokens'''] A_ : List[Any] = [] for i in range(len(doc['''token'''] ) ): if not doc["is_html"][i]: context.append(doc['''token'''][i] ) return { "context": " ".join(__A ), "answer": { "start_token": -1_0_0, # ignore index in cross-entropy "end_token": -1_0_0, # ignore index in cross-entropy "category": answer["category"], "span": answer["category"], # extra }, } # later, help in removing all no answers if answer["start_token"] == [-1]: return { "context": "None", "answer": { "start_token": -1, "end_token": -1, "category": "null", "span": "None", # extra }, } # handling normal samples A_ : List[Any] = ['''start_token''', '''end_token'''] answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10 A_ : Tuple = example['''document''']['''tokens'''] A_ : Union[str, Any] = answer['''start_token'''] A_ : Optional[Any] = answer['''end_token'''] A_ : Optional[Any] = [] for i in range(len(doc['''token'''] ) ): if not doc["is_html"][i]: context.append(doc['''token'''][i] ) else: if answer["start_token"] > i: start_token -= 1 if answer["end_token"] > i: end_token -= 1 A_ : Optional[int] = ''' '''.join(context[start_token:end_token] ) # checking above code if assertion: A_ : int = doc['''is_html'''][answer['''start_token'''] : answer['''end_token''']] A_ : List[Any] = doc['''token'''][answer['''start_token'''] : answer['''end_token''']] A_ : Optional[int] = ''' '''.join([old[i] for i in range(len(__A ) ) if not is_html[i]] ) if new != old: print('''ID:''' , example['''id'''] ) print('''New:''' , __A , end='''\n''' ) print('''Old:''' , __A , end='''\n\n''' ) return { "context": " ".join(__A ), "answer": { "start_token": start_token, "end_token": end_token - 1, # this makes it inclusive "category": answer["category"], # either long or short "span": new, # extra }, } def snake_case__ ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int]=2_0_4_8 , lowerCamelCase__ : Union[str, Any]=4_0_9_6 , lowerCamelCase__ : List[Any]=True ) -> Optional[Any]: A_ : List[str] = get_context_and_ans(__A , assertion=__A ) A_ : int = out['''answer'''] # later, removing these samples if answer["start_token"] == -1: return { "example_id": example["id"], "input_ids": [[-1]], "labels": { "start_token": [-1], "end_token": [-1], "category": ["null"], }, } A_ : Dict = tokenizer(example['''question''']['''text'''] , out['''context'''] ).input_ids A_ : Tuple = input_ids.index(tokenizer.sep_token_id ) + 1 # return yes/no if answer["category"][0] in ["yes", "no"]: # category is list with one element A_ : List[Any] = [] A_ : Optional[Any] = [] A_ : List[Any] = input_ids[:q_len] A_ : int = range(__A , len(__A ) , max_length - doc_stride ) for i in doc_start_indices: A_ : Dict = i + max_length - q_len A_ : Union[str, Any] = input_ids[i:end_index] inputs.append(q_indices + slice ) category.append(answer['''category'''][0] ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": [-1_0_0] * len(__A ), "end_token": [-1_0_0] * len(__A ), "category": category, }, } A_ : str = out['''context'''].split() A_ : Any = splitted_context[answer['''end_token''']] A_ : str = len( tokenizer( ''' '''.join(splitted_context[: answer['''start_token''']] ) , add_special_tokens=__A , ).input_ids ) A_ : Union[str, Any] = len( tokenizer(''' '''.join(splitted_context[: answer['''end_token''']] ) , add_special_tokens=__A ).input_ids ) answer["start_token"] += q_len answer["end_token"] += q_len # fixing end token A_ : str = len(tokenizer(__A , add_special_tokens=__A ).input_ids ) if num_sub_tokens > 1: answer["end_token"] += num_sub_tokens - 1 A_ : Dict = input_ids[answer['''start_token'''] : answer['''end_token'''] + 1] # right & left are inclusive A_ : List[str] = answer['''start_token'''] A_ : Optional[Any] = answer['''end_token'''] if assertion: A_ : Optional[int] = tokenizer.decode(__A ) if answer["span"] != new: print('''ISSUE IN TOKENIZATION''' ) print('''OLD:''' , answer['''span'''] ) print('''NEW:''' , __A , end='''\n\n''' ) if len(__A ) <= max_length: return { "example_id": example["id"], "input_ids": [input_ids], "labels": { "start_token": [answer["start_token"]], "end_token": [answer["end_token"]], "category": answer["category"], }, } A_ : Union[str, Any] = input_ids[:q_len] A_ : Optional[Any] = range(__A , len(__A ) , max_length - doc_stride ) A_ : str = [] A_ : Union[str, Any] = [] A_ : Any = [] A_ : List[str] = [] # null, yes, no, long, short for i in doc_start_indices: A_ : Optional[Any] = i + max_length - q_len A_ : Optional[Any] = input_ids[i:end_index] inputs.append(q_indices + slice ) assert len(inputs[-1] ) <= max_length, "Issue in truncating length" if start_token >= i and end_token <= end_index - 1: A_ : Tuple = start_token - i + q_len A_ : Optional[int] = end_token - i + q_len answers_category.append(answer['''category'''][0] ) # ["short"] -> "short" else: A_ : Tuple = -1_0_0 A_ : Optional[int] = -1_0_0 answers_category.append('''null''' ) A_ : Optional[int] = inputs[-1][start_token : end_token + 1] answers_start_token.append(__A ) answers_end_token.append(__A ) if assertion: if new != old and new != [tokenizer.cls_token_id]: print('''ISSUE in strided for ID:''' , example['''id'''] ) print('''New:''' , tokenizer.decode(__A ) ) print('''Old:''' , tokenizer.decode(__A ) , end='''\n\n''' ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": answers_start_token, "end_token": answers_end_token, "category": answers_category, }, } def snake_case__ ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any]=2_0_4_8 , lowerCamelCase__ : Dict=4_0_9_6 , lowerCamelCase__ : Union[str, Any]=False ) -> Optional[Any]: A_ : Any = get_strided_contexts_and_ans( __A , __A , doc_stride=__A , max_length=__A , assertion=__A , ) return example def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Any ) -> Optional[Any]: with jsonlines.open(__A , '''a''' ) as writer: for example in tqdm(__A , total=len(__A ) , desc='''Saving samples ... ''' ): A_ : int = example['''labels'''] for ids, start, end, cat in zip( example['''input_ids'''] , labels['''start_token'''] , labels['''end_token'''] , labels['''category'''] , ): if start == -1 and end == -1: continue # leave waste samples with no answer if cat == "null" and np.random.rand() < 0.6: continue # removing 50 % samples writer.write( { '''input_ids''': ids, '''start_token''': start, '''end_token''': end, '''category''': CATEGORY_MAPPING[cat], } ) if __name__ == "__main__": from datasets import load_dataset from transformers import BigBirdTokenizer snake_case__ = load_dataset("""natural_questions""") snake_case__ = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""") snake_case__ = data["train" if PROCESS_TRAIN == "true" else "validation"] snake_case__ = { "tokenizer": tokenizer, "doc_stride": DOC_STRIDE, "max_length": MAX_LENGTH, "assertion": False, } snake_case__ = data.map(prepare_inputs, fn_kwargs=fn_kwargs) snake_case__ = data.remove_columns(["""annotations""", """document""", """id""", """question"""]) print(data) np.random.seed(SEED) snake_case__ = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl" save_to_disk(data, file_name=cache_file_name)
356
'''simple docstring''' from __future__ import annotations from PIL import Image # Define glider example snake_case__ = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example snake_case__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def snake_case__ ( lowerCamelCase__ : list[list[int]] ) -> list[list[int]]: A_ : str = [] for i in range(len(lowerCamelCase__ ) ): A_ : Optional[Any] = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours A_ : Optional[int] = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(lowerCamelCase__ ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(lowerCamelCase__ ) - 1: neighbour_count += cells[i + 1][j] if i < len(lowerCamelCase__ ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. A_ : List[str] = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(lowerCamelCase__ ) return next_generation def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int ) -> list[Image.Image]: A_ : List[Any] = [] for _ in range(lowerCamelCase__ ): # Create output image A_ : Optional[int] = Image.new('''RGB''' , (len(cells[0] ), len(lowerCamelCase__ )) ) A_ : int = img.load() # Save cells to image for x in range(len(lowerCamelCase__ ) ): for y in range(len(cells[0] ) ): A_ : Optional[Any] = 2_5_5 - cells[y][x] * 2_5_5 A_ : str = (colour, colour, colour) # Save image images.append(lowerCamelCase__ ) A_ : Optional[int] = new_generation(lowerCamelCase__ ) return images if __name__ == "__main__": snake_case__ = generate_images(GLIDER, 16) images[0].save("""out.gif""", save_all=True, append_images=images[1:])
4
0
'''simple docstring''' import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class UpperCamelCase_ (lowercase__ ): """simple docstring""" _lowerCAmelCase = 'Wav2Vec2FeatureExtractor' _lowerCAmelCase = 'AutoTokenizer' def __init__( self : Optional[Any] , _lowerCamelCase : Dict , _lowerCamelCase : List[Any] ): """simple docstring""" super().__init__(_UpperCamelCase , _UpperCamelCase ) A_ : List[Any] = self.feature_extractor A_ : Dict = False @classmethod def _a ( cls : Optional[int] , _lowerCamelCase : str , **_lowerCamelCase : List[str] ): """simple docstring""" try: return super().from_pretrained(_UpperCamelCase , **_UpperCamelCase ) except OSError: warnings.warn( f'Loading a tokenizer inside {cls.__name__} from a config that does not' ''' include a `tokenizer_class` attribute is deprecated and will be ''' '''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`''' ''' attribute to either your `config.json` or `tokenizer_config.json` ''' '''file to suppress this warning: ''' , _UpperCamelCase , ) A_ : Dict = WavaVecaFeatureExtractor.from_pretrained(_UpperCamelCase , **_UpperCamelCase ) A_ : Dict = WavaVecaCTCTokenizer.from_pretrained(_UpperCamelCase , **_UpperCamelCase ) return cls(feature_extractor=_UpperCamelCase , tokenizer=_UpperCamelCase ) def __call__( self : Optional[Any] , *_lowerCamelCase : Any , **_lowerCamelCase : Tuple ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*_UpperCamelCase , **_UpperCamelCase ) if "raw_speech" in kwargs: warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' ) A_ : Optional[Any] = kwargs.pop('''raw_speech''' ) else: A_ : Any = kwargs.pop('''audio''' , _UpperCamelCase ) A_ : str = kwargs.pop('''sampling_rate''' , _UpperCamelCase ) A_ : Optional[Any] = kwargs.pop('''text''' , _UpperCamelCase ) if len(_UpperCamelCase ) > 0: A_ : Tuple = args[0] A_ : int = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: A_ : Optional[Any] = self.feature_extractor(_UpperCamelCase , *_UpperCamelCase , sampling_rate=_UpperCamelCase , **_UpperCamelCase ) if text is not None: A_ : Any = self.tokenizer(_UpperCamelCase , **_UpperCamelCase ) if text is None: return inputs elif audio is None: return encodings else: A_ : str = encodings["""input_ids"""] return inputs def _a ( self : Dict , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : List[Any] ): """simple docstring""" if self._in_target_context_manager: return self.current_processor.pad(*_UpperCamelCase , **_UpperCamelCase ) A_ : Any = kwargs.pop('''input_features''' , _UpperCamelCase ) A_ : List[Any] = kwargs.pop('''labels''' , _UpperCamelCase ) if len(_UpperCamelCase ) > 0: A_ : Any = args[0] A_ : List[str] = args[1:] if input_features is not None: A_ : int = self.feature_extractor.pad(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase ) if labels is not None: A_ : Optional[int] = self.tokenizer.pad(_UpperCamelCase , **_UpperCamelCase ) if labels is None: return input_features elif input_features is None: return labels else: A_ : List[Any] = labels["""input_ids"""] return input_features def _a ( self : List[Any] , *_lowerCamelCase : Any , **_lowerCamelCase : int ): """simple docstring""" return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase ) def _a ( self : Optional[Any] , *_lowerCamelCase : Union[str, Any] , **_lowerCamelCase : Optional[int] ): """simple docstring""" return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase ) @contextmanager def _a ( self : List[str] ): """simple docstring""" warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your audio inputs, or in a separate call.''' ) A_ : str = True A_ : Optional[int] = self.tokenizer yield A_ : Any = self.feature_extractor A_ : str = False
357
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Any = tempfile.mkdtemp() A_ : List[Any] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) A_ : Tuple = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], '''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], } A_ : List[Any] = os.path.join(self.tmpdirname , _lowerCamelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Dict , **_lowerCamelCase : Tuple ): """simple docstring""" return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def _a ( self : Optional[int] , **_lowerCamelCase : Optional[int] ): """simple docstring""" return BertTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def _a ( self : Optional[Any] , **_lowerCamelCase : Tuple ): """simple docstring""" return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def _a ( self : int ): """simple docstring""" A_ : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] A_ : Any = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _a ( self : int ): """simple docstring""" A_ : Tuple = self.get_tokenizer() A_ : Tuple = self.get_rust_tokenizer() A_ : Dict = self.get_image_processor() A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) A_ : str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase ) A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) A_ : List[Any] = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase ) self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase ) self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase ) def _a ( self : List[Any] ): """simple docstring""" A_ : List[str] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A_ : Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) A_ : Tuple = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 ) A_ : List[str] = AlignProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _lowerCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _lowerCamelCase ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Dict = self.get_image_processor() A_ : Any = self.get_tokenizer() A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : Any = self.prepare_image_inputs() A_ : List[Any] = image_processor(_lowerCamelCase , return_tensors='''np''' ) A_ : str = processor(images=_lowerCamelCase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _a ( self : Dict ): """simple docstring""" A_ : str = self.get_image_processor() A_ : List[str] = self.get_tokenizer() A_ : Optional[int] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : int = '''lower newer''' A_ : str = processor(text=_lowerCamelCase ) A_ : Dict = tokenizer(_lowerCamelCase , padding='''max_length''' , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _a ( self : str ): """simple docstring""" A_ : Optional[int] = self.get_image_processor() A_ : Optional[Any] = self.get_tokenizer() A_ : List[str] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : List[Any] = '''lower newer''' A_ : Optional[int] = self.prepare_image_inputs() A_ : List[Any] = processor(text=_lowerCamelCase , images=_lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(_lowerCamelCase ): processor() def _a ( self : List[str] ): """simple docstring""" A_ : Optional[Any] = self.get_image_processor() A_ : Optional[int] = self.get_tokenizer() A_ : List[Any] = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A_ : str = processor.batch_decode(_lowerCamelCase ) A_ : Union[str, Any] = tokenizer.batch_decode(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : str = self.get_image_processor() A_ : Tuple = self.get_tokenizer() A_ : Any = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : str = '''lower newer''' A_ : List[str] = self.prepare_image_inputs() A_ : Tuple = processor(text=_lowerCamelCase , images=_lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
4
0
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : Union[str, Any] ) -> Dict: A_ : Any = set() # edges = list of graph's edges A_ : str = get_edges(lowerCamelCase__ ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: A_ ,A_ : int = edges.pop() chosen_vertices.add(lowerCamelCase__ ) chosen_vertices.add(lowerCamelCase__ ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(lowerCamelCase__ ) return chosen_vertices def snake_case__ ( lowerCamelCase__ : Union[str, Any] ) -> Tuple: A_ : Any = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
358
'''simple docstring''' import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = """▁""" snake_case__ = { """vocab_file""": """vocab.json""", """spm_file""": """sentencepiece.bpe.model""", } snake_case__ = { """vocab_file""": { """facebook/s2t-small-librispeech-asr""": ( """https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json""" ), }, """spm_file""": { """facebook/s2t-small-librispeech-asr""": ( """https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model""" ) }, } snake_case__ = { """facebook/s2t-small-librispeech-asr""": 10_24, } snake_case__ = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""] snake_case__ = {"""mustc""": MUSTC_LANGS} class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = VOCAB_FILES_NAMES _lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase = MAX_MODEL_INPUT_SIZES _lowerCAmelCase = ['input_ids', 'attention_mask'] _lowerCAmelCase = [] def __init__( self : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : str="<s>" , _lowerCamelCase : Union[str, Any]="</s>" , _lowerCamelCase : Dict="<pad>" , _lowerCamelCase : str="<unk>" , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : int=False , _lowerCamelCase : Any=None , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[Dict[str, Any]] = None , **_lowerCamelCase : Optional[int] , ): """simple docstring""" A_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , do_upper_case=_lowerCamelCase , do_lower_case=_lowerCamelCase , tgt_lang=_lowerCamelCase , lang_codes=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , ) A_ : Optional[int] = do_upper_case A_ : Tuple = do_lower_case A_ : Tuple = load_json(_lowerCamelCase ) A_ : Tuple = {v: k for k, v in self.encoder.items()} A_ : List[Any] = spm_file A_ : List[str] = load_spm(_lowerCamelCase , self.sp_model_kwargs ) if lang_codes is not None: A_ : Any = lang_codes A_ : Optional[Any] = LANGUAGES[lang_codes] A_ : Optional[Any] = [f'<lang:{lang}>' for lang in self.langs] A_ : Union[str, Any] = {lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs} A_ : Optional[int] = self.lang_tokens A_ : int = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: A_ : Dict = {} @property def _a ( self : Tuple ): """simple docstring""" return len(self.encoder ) @property def _a ( self : int ): """simple docstring""" return self._tgt_lang @tgt_lang.setter def _a ( self : List[str] , _lowerCamelCase : Any ): """simple docstring""" A_ : int = new_tgt_lang self.set_tgt_lang_special_tokens(_lowerCamelCase ) def _a ( self : Tuple , _lowerCamelCase : str ): """simple docstring""" A_ : List[str] = self.lang_code_to_id[tgt_lang] A_ : Optional[Any] = [lang_code_id] def _a ( self : Optional[Any] , _lowerCamelCase : str ): """simple docstring""" return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase ) def _a ( self : List[Any] , _lowerCamelCase : int ): """simple docstring""" return self.encoder.get(_lowerCamelCase , self.encoder[self.unk_token] ) def _a ( self : int , _lowerCamelCase : int ): """simple docstring""" return self.decoder.get(_lowerCamelCase , self.unk_token ) def _a ( self : int , _lowerCamelCase : List[str] ): """simple docstring""" A_ : List[Any] = [] A_ : Any = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: A_ : Union[str, Any] = self.sp_model.decode(_lowerCamelCase ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " A_ : Optional[Any] = [] else: current_sub_tokens.append(_lowerCamelCase ) A_ : Tuple = self.sp_model.decode(_lowerCamelCase ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def _a ( self : int , _lowerCamelCase : Dict , _lowerCamelCase : Any=None ): """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def _a ( self : List[Any] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase ) A_ : Tuple = [1] * len(self.prefix_tokens ) A_ : Tuple = [1] if token_ids_a is None: return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones def _a ( self : Dict ): """simple docstring""" A_ : Union[str, Any] = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Union[str, Any] ): """simple docstring""" A_ : Dict = self.__dict__.copy() A_ : List[Any] = None return state def __setstate__( self : List[str] , _lowerCamelCase : Dict ): """simple docstring""" A_ : Dict = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): A_ : Optional[int] = {} A_ : int = load_spm(self.spm_file , self.sp_model_kwargs ) def _a ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ): """simple docstring""" A_ : Dict = Path(_lowerCamelCase ) assert save_dir.is_dir(), f'{save_directory} should be a directory' A_ : Optional[int] = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file'''] ) A_ : Optional[int] = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file'''] ) save_json(self.encoder , _lowerCamelCase ) if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , _lowerCamelCase ) elif not os.path.isfile(self.spm_file ): with open(_lowerCamelCase , '''wb''' ) as fi: A_ : List[str] = self.sp_model.serialized_model_proto() fi.write(_lowerCamelCase ) return (str(_lowerCamelCase ), str(_lowerCamelCase )) def snake_case__ ( lowerCamelCase__ : str , lowerCamelCase__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor: A_ : Tuple = sentencepiece.SentencePieceProcessor(**lowerCamelCase__ ) spm.Load(str(lowerCamelCase__ ) ) return spm def snake_case__ ( lowerCamelCase__ : str ) -> Union[Dict, List]: with open(lowerCamelCase__ , '''r''' ) as f: return json.load(lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : str ) -> None: with open(lowerCamelCase__ , '''w''' ) as f: json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=2 )
4
0
'''simple docstring''' from __future__ import annotations def snake_case__ ( lowerCamelCase__ : float , lowerCamelCase__ : float , lowerCamelCase__ : float , ) -> str: if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif electron_conc < 0: raise ValueError('''Electron concentration cannot be negative in a semiconductor''' ) elif hole_conc < 0: raise ValueError('''Hole concentration cannot be negative in a semiconductor''' ) elif intrinsic_conc < 0: raise ValueError( '''Intrinsic concentration cannot be negative in a semiconductor''' ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
359
'''simple docstring''' import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 snake_case__ = sys.version_info >= (3, 10) def snake_case__ ( lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : str=None ) -> List[Any]: return field(default_factory=lambda: default , metadata=lowerCamelCase__ ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 _lowerCAmelCase = 42 @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 4_2 _lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = False _lowerCAmelCase = True _lowerCAmelCase = None class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'titi' _lowerCAmelCase = 'toto' class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'titi' _lowerCAmelCase = 'toto' _lowerCAmelCase = 4_2 @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = "toto" def _a ( self : Optional[Any] ): """simple docstring""" A_ : Optional[int] = BasicEnum(self.foo ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = "toto" def _a ( self : Tuple ): """simple docstring""" A_ : Optional[Any] = MixedTypeEnum(self.foo ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} ) _lowerCAmelCase = None _lowerCAmelCase = list_field(default=[] ) _lowerCAmelCase = list_field(default=[] ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = list_field(default=[] ) _lowerCAmelCase = list_field(default=[1, 2, 3] ) _lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) _lowerCAmelCase = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = field() _lowerCAmelCase = field() _lowerCAmelCase = field() def _a ( self : Tuple ): """simple docstring""" A_ : Tuple = BasicEnum(self.required_enum ) @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = 42 _lowerCAmelCase = field() _lowerCAmelCase = None _lowerCAmelCase = field(default='toto', metadata={'help': 'help message'} ) _lowerCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] ) if is_python_no_less_than_3_10: @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = False _lowerCAmelCase = True _lowerCAmelCase = None @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = None _lowerCAmelCase = field(default=a__, metadata={'help': 'help message'} ) _lowerCAmelCase = None _lowerCAmelCase = list_field(default=[] ) _lowerCAmelCase = list_field(default=[] ) class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : List[str] , _lowerCamelCase : argparse.ArgumentParser , _lowerCamelCase : argparse.ArgumentParser ): """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): A_ : Union[str, Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''} A_ : Optional[Any] = {k: v for k, v in vars(_lowerCamelCase ).items() if k != '''container'''} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('''choices''' , _lowerCamelCase ) and yy.get('''choices''' , _lowerCamelCase ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['''type'''](_lowerCamelCase ) , yy['''type'''](_lowerCamelCase ) ) del xx["type"], yy["type"] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Optional[int] ): """simple docstring""" A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase ) A_ : Optional[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--bar''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--baz''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--flag''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Union[str, Any] = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5'''] ((A_) ,) : List[str] = parser.parse_args_into_dataclasses(_lowerCamelCase , look_for_args_file=_lowerCamelCase ) self.assertFalse(example.flag ) def _a ( self : Dict ): """simple docstring""" A_ : int = HfArgumentParser(_lowerCamelCase ) A_ : int = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=42 , type=_lowerCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Dict ): """simple docstring""" A_ : Any = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) expected.add_argument('''--baz''' , type=_lowerCamelCase , default=_lowerCamelCase , const=_lowerCamelCase , nargs='''?''' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('''--no_baz''' , action='''store_false''' , default=_lowerCamelCase , dest='''baz''' ) expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase ) A_ : Dict = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(_lowerCamelCase ) for dataclass_type in dataclass_types: A_ : Any = HfArgumentParser(_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : List[Any] = parser.parse_args([] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : Optional[int] = parser.parse_args(['''--foo''', '''--no_baz'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : Union[str, Any] = parser.parse_args(['''--foo''', '''--baz'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : List[str] = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) A_ : List[Any] = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , baz=_lowerCamelCase , opt=_lowerCamelCase ) ) def _a ( self : List[Any] ): """simple docstring""" A_ : str = HfArgumentParser(_lowerCamelCase ) A_ : Optional[int] = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : str = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) A_ : List[Any] = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) A_ : int = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) A_ : Dict = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) A_ : Tuple = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) A_ : List[str] = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def _a ( self : Optional[int] ): """simple docstring""" @dataclass class UpperCamelCase_ : """simple docstring""" _lowerCAmelCase = "toto" A_ : List[str] = HfArgumentParser(_lowerCamelCase ) A_ : Tuple = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Tuple = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) A_ : List[str] = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) A_ : int = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 42 ) def _a ( self : Dict ): """simple docstring""" A_ : int = HfArgumentParser(_lowerCamelCase ) A_ : List[Any] = argparse.ArgumentParser() expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=_lowerCamelCase ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase ) expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : Optional[int] = parser.parse_args([] ) self.assertEqual( _lowerCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , ) A_ : str = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() ) self.assertEqual(_lowerCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) ) def _a ( self : Dict ): """simple docstring""" A_ : Optional[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=_lowerCamelCase , type=_lowerCamelCase ) expected.add_argument('''--bar''' , default=_lowerCamelCase , type=_lowerCamelCase , help='''help message''' ) expected.add_argument('''--baz''' , default=_lowerCamelCase , type=_lowerCamelCase ) expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=_lowerCamelCase ) A_ : Tuple = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(_lowerCamelCase ) for dataclass_type in dataclass_types: A_ : int = HfArgumentParser(_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) A_ : List[Any] = parser.parse_args([] ) self.assertEqual(_lowerCamelCase , Namespace(foo=_lowerCamelCase , bar=_lowerCamelCase , baz=_lowerCamelCase , ces=[] , des=[] ) ) A_ : Optional[Any] = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() ) self.assertEqual(_lowerCamelCase , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) ) def _a ( self : List[Any] ): """simple docstring""" A_ : List[Any] = HfArgumentParser(_lowerCamelCase ) A_ : Dict = argparse.ArgumentParser() expected.add_argument('''--required_list''' , nargs='''+''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument('''--required_str''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase ) A_ : List[Any] = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=_lowerCamelCase , required=_lowerCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=_lowerCamelCase , ) expected.add_argument('''--opt''' , type=_lowerCamelCase , default=_lowerCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=_lowerCamelCase , help='''help message''' ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=_lowerCamelCase ) self.argparsersEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : List[Any] = HfArgumentParser(_lowerCamelCase ) A_ : Union[str, Any] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } A_ : Optional[int] = parser.parse_dict(_lowerCamelCase )[0] A_ : str = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : List[str] ): """simple docstring""" A_ : Any = HfArgumentParser(_lowerCamelCase ) A_ : List[str] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, '''extra''': 42, } self.assertRaises(_lowerCamelCase , parser.parse_dict , _lowerCamelCase , allow_extra_keys=_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Union[str, Any] = HfArgumentParser(_lowerCamelCase ) A_ : List[str] = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: A_ : Tuple = os.path.join(_lowerCamelCase , '''temp_json''' ) os.mkdir(_lowerCamelCase ) with open(temp_local_path + '''.json''' , '''w+''' ) as f: json.dump(_lowerCamelCase , _lowerCamelCase ) A_ : List[str] = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0] A_ : Optional[Any] = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : int ): """simple docstring""" A_ : int = HfArgumentParser(_lowerCamelCase ) A_ : Tuple = { '''foo''': 12, '''bar''': 3.14, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: A_ : int = os.path.join(_lowerCamelCase , '''temp_yaml''' ) os.mkdir(_lowerCamelCase ) with open(temp_local_path + '''.yaml''' , '''w+''' ) as f: yaml.dump(_lowerCamelCase , _lowerCamelCase ) A_ : Optional[Any] = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0] A_ : int = BasicExample(**_lowerCamelCase ) self.assertEqual(_lowerCamelCase , _lowerCamelCase ) def _a ( self : Union[str, Any] ): """simple docstring""" A_ : Dict = HfArgumentParser(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase )
4
0
'''simple docstring''' import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class UpperCamelCase_ (__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = VQModel _lowerCAmelCase = 'sample' @property def _a ( self : List[Any] , _lowerCamelCase : Optional[int]=(32, 32) ): """simple docstring""" A_ : str = 4 A_ : Any = 3 A_ : List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(_a ) return {"sample": image} @property def _a ( self : Tuple ): """simple docstring""" return (3, 32, 32) @property def _a ( self : int ): """simple docstring""" return (3, 32, 32) def _a ( self : Any ): """simple docstring""" A_ : Union[str, Any] = { '''block_out_channels''': [32, 64], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 3, } A_ : Dict = self.dummy_input return init_dict, inputs_dict def _a ( self : Optional[int] ): """simple docstring""" pass def _a ( self : Union[str, Any] ): """simple docstring""" pass def _a ( self : Tuple ): """simple docstring""" A_ ,A_ : Optional[int] = VQModel.from_pretrained('''fusing/vqgan-dummy''' , output_loading_info=_a ) self.assertIsNotNone(_a ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(_a ) A_ : Optional[Any] = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def _a ( self : List[Any] ): """simple docstring""" A_ : Any = VQModel.from_pretrained('''fusing/vqgan-dummy''' ) model.to(_a ).eval() torch.manual_seed(0 ) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0 ) A_ : Dict = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size ) A_ : List[str] = image.to(_a ) with torch.no_grad(): A_ : Optional[int] = model(_a ).sample A_ : Union[str, Any] = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off A_ : int = torch.tensor([-0.01_53, -0.40_44, -0.18_80, -0.51_61, -0.24_18, -0.40_72, -0.16_12, -0.06_33, -0.01_43] ) # fmt: on self.assertTrue(torch.allclose(_a , _a , atol=1E-3 ) )
360
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 snake_case__ = get_tests_dir("""fixtures""") class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def _a ( self : List[str] ): """simple docstring""" A_ : List[Any] = mock.Mock() A_ : List[str] = 500 A_ : Tuple = {} A_ : int = HTTPError A_ : Optional[Any] = {} # Download this model to make sure it's in the cache. A_ : Tuple = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' , return_value=_lowerCamelCase ) as mock_head: A_ : List[Any] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # This check we did call the fake head request mock_head.assert_called() def _a ( self : Tuple ): """simple docstring""" A_ : Tuple = ViTImageProcessor.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' ) def _a ( self : Dict ): """simple docstring""" with self.assertRaises(_lowerCamelCase ): # config is in subfolder, the following should not work without specifying the subfolder A_ : Any = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' ) A_ : Tuple = AutoImageProcessor.from_pretrained( '''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' ) self.assertIsNotNone(_lowerCamelCase ) @is_staging_test class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @classmethod def _a ( cls : Tuple ): """simple docstring""" A_ : int = TOKEN HfFolder.save_token(_lowerCamelCase ) @classmethod def _a ( cls : str ): """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-image-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' ) except HTTPError: pass def _a ( self : List[Any] ): """simple docstring""" A_ : Dict = ViTImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token ) A_ : Optional[int] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( _lowerCamelCase , repo_id='''test-image-processor''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token ) A_ : List[Any] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : int = ViTImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token ) A_ : List[str] = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( _lowerCamelCase , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_lowerCamelCase , use_auth_token=self._token ) A_ : Any = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) ) def _a ( self : Optional[Any] ): """simple docstring""" CustomImageProcessor.register_for_auto_class() A_ : Any = CustomImageProcessor.from_pretrained(_lowerCamelCase ) image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , ) A_ : str = AutoImageProcessor.from_pretrained( f'{USER}/test-dynamic-image-processor' , trust_remote_code=_lowerCamelCase ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
4
0
'''simple docstring''' import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" def __init__( self : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any=3 , _lowerCamelCase : Any=32 , _lowerCamelCase : Optional[int]=3 , _lowerCamelCase : List[str]=10 , _lowerCamelCase : Any=[10, 20, 30, 40] , _lowerCamelCase : Optional[int]=[1, 1, 2, 1] , _lowerCamelCase : List[str]=True , _lowerCamelCase : str=True , _lowerCamelCase : Any="relu" , _lowerCamelCase : str=3 , _lowerCamelCase : Union[str, Any]=None , ): """simple docstring""" A_ : List[str] = parent A_ : Tuple = batch_size A_ : Optional[Any] = image_size A_ : Dict = num_channels A_ : List[Any] = embeddings_size A_ : Any = hidden_sizes A_ : Dict = depths A_ : Union[str, Any] = is_training A_ : str = use_labels A_ : Optional[int] = hidden_act A_ : Optional[Any] = num_labels A_ : Tuple = scope A_ : Optional[Any] = len(_lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : List[str] = self.get_config() return config, pixel_values def _a ( self : List[Any] ): """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def _a ( self : Tuple , _lowerCamelCase : str , _lowerCamelCase : Dict ): """simple docstring""" A_ : Union[str, Any] = FlaxRegNetModel(config=_lowerCamelCase ) A_ : str = model(_lowerCamelCase ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _a ( self : Any , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple ): """simple docstring""" A_ : Tuple = self.num_labels A_ : Optional[Any] = FlaxRegNetForImageClassification(config=_lowerCamelCase ) A_ : List[Any] = model(_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self : List[Any] ): """simple docstring""" A_ : int = self.prepare_config_and_inputs() A_ : Tuple = config_and_inputs A_ : Any = {'pixel_values': pixel_values} return config, inputs_dict @require_flax class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False def _a ( self : List[Any] ): """simple docstring""" A_ : Union[str, Any] = FlaxRegNetModelTester(self ) A_ : List[str] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase ) def _a ( self : Any ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _a ( self : List[str] ): """simple docstring""" return def _a ( self : Union[str, Any] ): """simple docstring""" A_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def _a ( self : int ): """simple docstring""" A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) @unittest.skip(reason='''RegNet does not use inputs_embeds''' ) def _a ( self : Any ): """simple docstring""" pass @unittest.skip(reason='''RegNet does not support input and output embeddings''' ) def _a ( self : str ): """simple docstring""" pass def _a ( self : Tuple ): """simple docstring""" A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Optional[int] = model_class(_lowerCamelCase ) A_ : Dict = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : int = [*signature.parameters.keys()] A_ : Tuple = ['pixel_values'] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def _a ( self : Optional[int] ): """simple docstring""" def check_hidden_states_output(_lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple ): A_ : str = model_class(_lowerCamelCase ) A_ : Optional[int] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) A_ : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A_ : List[str] = self.model_tester.num_stages self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 ) A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Optional[int] = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A_ : Tuple = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def _a ( self : Dict ): """simple docstring""" A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): A_ : Tuple = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) A_ : Optional[int] = model_class(_lowerCamelCase ) @jax.jit def model_jitted(_lowerCamelCase : int , **_lowerCamelCase : str ): return model(pixel_values=_lowerCamelCase , **_lowerCamelCase ) with self.subTest('''JIT Enabled''' ): A_ : str = model_jitted(**_lowerCamelCase ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): A_ : Optional[int] = model_jitted(**_lowerCamelCase ).to_tuple() self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) ) for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) def snake_case__ ( ) -> Dict: A_ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_flax class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : Optional[Any] ): """simple docstring""" return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None @slow def _a ( self : Dict ): """simple docstring""" A_ : int = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' ) A_ : Union[str, Any] = self.default_image_processor A_ : Dict = prepare_img() A_ : Dict = image_processor(images=_lowerCamelCase , return_tensors='''np''' ) A_ : List[str] = model(**_lowerCamelCase ) # verify the logits A_ : Optional[int] = (1, 1000) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) A_ : Optional[Any] = jnp.array([-0.41_80, -1.50_51, -3.48_36] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) )
361
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = ( 'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.' 'It takes two arguments named `image` which should be the original image, and `label` which should be a text ' 'describing the elements what should be identified in the segmentation mask. The tool returns the mask.' ) _lowerCAmelCase = 'CIDAS/clipseg-rd64-refined' _lowerCAmelCase = 'image_segmenter' _lowerCAmelCase = CLIPSegForImageSegmentation _lowerCAmelCase = ['image', 'text'] _lowerCAmelCase = ['image'] def __init__( self : Optional[int] , *_lowerCamelCase : Optional[int] , **_lowerCamelCase : Union[str, Any] ): """simple docstring""" requires_backends(self , ['''vision'''] ) super().__init__(*_lowerCamelCase , **_lowerCamelCase ) def _a ( self : List[str] , _lowerCamelCase : "Image" , _lowerCamelCase : str ): """simple docstring""" return self.pre_processor(text=[label] , images=[image] , padding=_lowerCamelCase , return_tensors='''pt''' ) def _a ( self : Union[str, Any] , _lowerCamelCase : Optional[int] ): """simple docstring""" with torch.no_grad(): A_ : Optional[int] = self.model(**_lowerCamelCase ).logits return logits def _a ( self : List[str] , _lowerCamelCase : Optional[int] ): """simple docstring""" A_ : int = outputs.cpu().detach().numpy() A_ : Tuple = 0 A_ : List[str] = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
4
0
'''simple docstring''' from copy import deepcopy from typing import Optional, Union import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_tf_available, is_torch_available if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf class UpperCamelCase_ (__lowercase ): """simple docstring""" _lowerCAmelCase = ['''image_processor'''] _lowerCAmelCase = '''SamImageProcessor''' def __init__( self : List[str] , _lowerCamelCase : Any ): """simple docstring""" super().__init__(_lowerCamelCase ) A_ : Any = self.image_processor A_ : List[str] = -10 A_ : Union[str, Any] = self.image_processor.size["""longest_edge"""] def __call__( self : Union[str, Any] , _lowerCamelCase : str=None , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : List[Any]=None , _lowerCamelCase : int=None , _lowerCamelCase : Optional[Union[str, TensorType]] = None , **_lowerCamelCase : Optional[int] , ): """simple docstring""" A_ : Union[str, Any] = self.image_processor( _lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , ) # pop arguments that are not used in the foward but used nevertheless A_ : Optional[int] = encoding_image_processor["""original_sizes"""] if hasattr(_lowerCamelCase , '''numpy''' ): # Checks if Torch or TF tensor A_ : int = original_sizes.numpy() A_ : Union[str, Any] = self._check_and_preprocess_points( input_points=_lowerCamelCase , input_labels=_lowerCamelCase , input_boxes=_lowerCamelCase , ) A_ : Optional[Any] = self._normalize_and_convert( _lowerCamelCase , _lowerCamelCase , input_points=_lowerCamelCase , input_labels=_lowerCamelCase , input_boxes=_lowerCamelCase , return_tensors=_lowerCamelCase , ) return encoding_image_processor def _a ( self : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Any=None , _lowerCamelCase : Any="pt" , ): """simple docstring""" if input_points is not None: if len(_lowerCamelCase ) != len(_lowerCamelCase ): A_ : Union[str, Any] = [ self._normalize_coordinates(self.target_size , _lowerCamelCase , original_sizes[0] ) for point in input_points ] else: A_ : Optional[int] = [ self._normalize_coordinates(self.target_size , _lowerCamelCase , _lowerCamelCase ) for point, original_size in zip(_lowerCamelCase , _lowerCamelCase ) ] # check that all arrays have the same shape if not all(point.shape == input_points[0].shape for point in input_points ): if input_labels is not None: A_ : int = self._pad_points_and_labels(_lowerCamelCase , _lowerCamelCase ) A_ : Union[str, Any] = np.array(_lowerCamelCase ) if input_labels is not None: A_ : str = np.array(_lowerCamelCase ) if input_boxes is not None: if len(_lowerCamelCase ) != len(_lowerCamelCase ): A_ : List[Any] = [ self._normalize_coordinates(self.target_size , _lowerCamelCase , original_sizes[0] , is_bounding_box=_lowerCamelCase ) for box in input_boxes ] else: A_ : str = [ self._normalize_coordinates(self.target_size , _lowerCamelCase , _lowerCamelCase , is_bounding_box=_lowerCamelCase ) for box, original_size in zip(_lowerCamelCase , _lowerCamelCase ) ] A_ : Any = np.array(_lowerCamelCase ) if input_boxes is not None: if return_tensors == "pt": A_ : List[str] = torch.from_numpy(_lowerCamelCase ) # boxes batch size of 1 by default A_ : List[Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes elif return_tensors == "tf": A_ : List[Any] = tf.convert_to_tensor(_lowerCamelCase ) # boxes batch size of 1 by default A_ : str = tf.expand_dims(_lowerCamelCase , 1 ) if len(input_boxes.shape ) != 3 else input_boxes encoding_image_processor.update({'''input_boxes''': input_boxes} ) if input_points is not None: if return_tensors == "pt": A_ : Tuple = torch.from_numpy(_lowerCamelCase ) # point batch size of 1 by default A_ : str = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points elif return_tensors == "tf": A_ : List[Any] = tf.convert_to_tensor(_lowerCamelCase ) # point batch size of 1 by default A_ : int = tf.expand_dims(_lowerCamelCase , 1 ) if len(input_points.shape ) != 4 else input_points encoding_image_processor.update({'''input_points''': input_points} ) if input_labels is not None: if return_tensors == "pt": A_ : Dict = torch.from_numpy(_lowerCamelCase ) # point batch size of 1 by default A_ : int = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels elif return_tensors == "tf": A_ : List[Any] = tf.convert_to_tensor(_lowerCamelCase ) # point batch size of 1 by default A_ : Optional[int] = tf.expand_dims(_lowerCamelCase , 1 ) if len(input_labels.shape ) != 3 else input_labels encoding_image_processor.update({'''input_labels''': input_labels} ) return encoding_image_processor def _a ( self : Any , _lowerCamelCase : List[Any] , _lowerCamelCase : int ): """simple docstring""" A_ : Dict = max([point.shape[0] for point in input_points] ) A_ : Optional[int] = [] for i, point in enumerate(_lowerCamelCase ): if point.shape[0] != expected_nb_points: A_ : Dict = np.concatenate( [point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 ) A_ : int = np.append(input_labels[i] , [self.point_pad_value] ) processed_input_points.append(_lowerCamelCase ) A_ : List[Any] = processed_input_points return input_points, input_labels def _a ( self : List[str] , _lowerCamelCase : int , _lowerCamelCase : np.ndarray , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any]=False ): """simple docstring""" A_ : Optional[Any] = original_size A_ : Dict = self.image_processor._get_preprocess_shape(_lowerCamelCase , longest_edge=_lowerCamelCase ) A_ : Dict = deepcopy(_lowerCamelCase ).astype(_lowerCamelCase ) if is_bounding_box: A_ : int = coords.reshape(-1 , 2 , 2 ) A_ : List[Any] = coords[..., 0] * (new_w / old_w) A_ : Optional[Any] = coords[..., 1] * (new_h / old_h) if is_bounding_box: A_ : str = coords.reshape(-1 , 4 ) return coords def _a ( self : str , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Dict=None , _lowerCamelCase : Optional[int]=None , ): """simple docstring""" if input_points is not None: if hasattr(_lowerCamelCase , '''numpy''' ): # Checks for TF or Torch tensor A_ : Tuple = input_points.numpy().tolist() if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not isinstance(input_points[0] , _lowerCamelCase ): raise ValueError('''Input points must be a list of list of floating points.''' ) A_ : List[Any] = [np.array(_lowerCamelCase ) for input_point in input_points] else: A_ : Optional[int] = None if input_labels is not None: if hasattr(_lowerCamelCase , '''numpy''' ): A_ : Any = input_labels.numpy().tolist() if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not isinstance(input_labels[0] , _lowerCamelCase ): raise ValueError('''Input labels must be a list of list integers.''' ) A_ : Dict = [np.array(_lowerCamelCase ) for label in input_labels] else: A_ : List[Any] = None if input_boxes is not None: if hasattr(_lowerCamelCase , '''numpy''' ): A_ : Tuple = input_boxes.numpy().tolist() if ( not isinstance(_lowerCamelCase , _lowerCamelCase ) or not isinstance(input_boxes[0] , _lowerCamelCase ) or not isinstance(input_boxes[0][0] , _lowerCamelCase ) ): raise ValueError('''Input boxes must be a list of list of list of floating points.''' ) A_ : int = [np.array(_lowerCamelCase ).astype(np.floataa ) for box in input_boxes] else: A_ : List[str] = None return input_points, input_labels, input_boxes @property def _a ( self : Optional[Any] ): """simple docstring""" A_ : Tuple = self.image_processor.model_input_names return list(dict.fromkeys(_lowerCamelCase ) ) def _a ( self : Any , *_lowerCamelCase : List[Any] , **_lowerCamelCase : Any ): """simple docstring""" return self.image_processor.post_process_masks(*_lowerCamelCase , **_lowerCamelCase )
362
'''simple docstring''' from collections.abc import Sequence def snake_case__ ( lowerCamelCase__ : Sequence[float] , lowerCamelCase__ : bool = False ) -> float: if not arr: return 0 A_ : Union[str, Any] = 0 if allow_empty_subarrays else float('''-inf''' ) A_ : str = 0.0 for num in arr: A_ : Any = max(0 if allow_empty_subarrays else num , curr_sum + num ) A_ : Tuple = max(lowerCamelCase__ , lowerCamelCase__ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() snake_case__ = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'{max_subarray_sum(nums) = }')
4
0
'''simple docstring''' import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) snake_case__ = { """iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""", """iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""", """iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""", """mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""", """mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""", """mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""", """mask_downscaling.0""": """mask_embed.conv1""", """mask_downscaling.1""": """mask_embed.layer_norm1""", """mask_downscaling.3""": """mask_embed.conv2""", """mask_downscaling.4""": """mask_embed.layer_norm2""", """mask_downscaling.6""": """mask_embed.conv3""", """point_embeddings""": """point_embed""", """pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""", """image_encoder""": """vision_encoder""", """neck.0""": """neck.conv1""", """neck.1""": """neck.layer_norm1""", """neck.2""": """neck.conv2""", """neck.3""": """neck.layer_norm2""", """patch_embed.proj""": """patch_embed.projection""", """.norm""": """.layer_norm""", """blocks""": """layers""", } def snake_case__ ( lowerCamelCase__ : Any ) -> Optional[Any]: A_ : Any = {} state_dict.pop('''pixel_mean''' , a_ ) state_dict.pop('''pixel_std''' , a_ ) A_ : Optional[int] = R'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*''' for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: A_ : Optional[Any] = key.replace(a_ , a_ ) if re.match(a_ , a_ ): A_ : int = int(re.match(a_ , a_ ).group(2 ) ) if layer_nb == 0: A_ : Dict = key.replace('''layers.0''' , '''proj_in''' ) elif layer_nb == 1: A_ : List[Any] = key.replace('''layers.1''' , '''layers.0''' ) elif layer_nb == 2: A_ : Union[str, Any] = key.replace('''layers.2''' , '''proj_out''' ) A_ : List[Any] = value A_ : Any = model_state_dict[ '''prompt_encoder.shared_embedding.positional_embedding''' ] return model_state_dict def snake_case__ ( lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any="ybelkada/segment-anything" ) -> Union[str, Any]: A_ : int = hf_hub_download(a_ , f'checkpoints/{model_name}.pth' ) if "sam_vit_b" in model_name: A_ : int = SamConfig() elif "sam_vit_l" in model_name: A_ : Optional[int] = SamVisionConfig( hidden_size=1_0_2_4 , num_hidden_layers=2_4 , num_attention_heads=1_6 , global_attn_indexes=[5, 1_1, 1_7, 2_3] , ) A_ : Any = SamConfig( vision_config=a_ , ) elif "sam_vit_h" in model_name: A_ : List[str] = SamVisionConfig( hidden_size=1_2_8_0 , num_hidden_layers=3_2 , num_attention_heads=1_6 , global_attn_indexes=[7, 1_5, 2_3, 3_1] , ) A_ : Optional[Any] = SamConfig( vision_config=a_ , ) A_ : Optional[Any] = torch.load(a_ , map_location='''cpu''' ) A_ : Dict = replace_keys(a_ ) A_ : Dict = SamImageProcessor() A_ : Union[str, Any] = SamProcessor(image_processor=a_ ) A_ : int = SamModel(a_ ) hf_model.load_state_dict(a_ ) A_ : List[str] = hf_model.to('''cuda''' ) A_ : Any = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png''' A_ : Any = Image.open(requests.get(a_ , stream=a_ ).raw ).convert('''RGB''' ) A_ : Optional[int] = [[[4_0_0, 6_5_0]]] A_ : Optional[int] = [[1]] A_ : List[Any] = processor(images=np.array(a_ ) , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): A_ : int = hf_model(**a_ ) A_ : str = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.579890251159668 A_ : List[str] = processor( images=np.array(a_ ) , input_points=a_ , input_labels=a_ , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): A_ : Tuple = hf_model(**a_ ) A_ : List[str] = output.iou_scores.squeeze() assert scores[-1].item() == 0.9712603092193604 A_ : Union[str, Any] = ((7_5, 2_7_5, 1_7_2_5, 8_5_0),) A_ : Union[str, Any] = processor(images=np.array(a_ ) , input_boxes=a_ , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): A_ : int = hf_model(**a_ ) A_ : Optional[Any] = output.iou_scores.squeeze() assert scores[-1].item() == 0.8686015605926514 # Test with 2 points and 1 image. A_ : str = [[[4_0_0, 6_5_0], [8_0_0, 6_5_0]]] A_ : Optional[Any] = [[1, 1]] A_ : Optional[int] = processor( images=np.array(a_ ) , input_points=a_ , input_labels=a_ , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): A_ : List[Any] = hf_model(**a_ ) A_ : Any = output.iou_scores.squeeze() assert scores[-1].item() == 0.9936047792434692 if __name__ == "__main__": snake_case__ = argparse.ArgumentParser() snake_case__ = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""] parser.add_argument( """--model_name""", default="""sam_vit_h_4b8939""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) parser.add_argument( """--model_hub_id""", default="""ybelkada/segment-anything""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) snake_case__ = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
363
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ = logging.get_logger(__name__) snake_case__ = { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json""" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'speech_to_text_2' _lowerCAmelCase = ['past_key_values'] _lowerCAmelCase = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : Optional[Any] , _lowerCamelCase : Optional[Any]=10000 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : int=2048 , _lowerCamelCase : Dict=4 , _lowerCamelCase : str=0.0 , _lowerCamelCase : int=True , _lowerCamelCase : int="relu" , _lowerCamelCase : Any=256 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Optional[Any]=0.02 , _lowerCamelCase : int=2 , _lowerCamelCase : List[str]=True , _lowerCamelCase : str=1 , _lowerCamelCase : List[Any]=0 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : Tuple=1024 , **_lowerCamelCase : int , ): """simple docstring""" A_ : Optional[int] = vocab_size A_ : Tuple = d_model A_ : List[str] = decoder_ffn_dim A_ : str = decoder_layers A_ : Any = decoder_attention_heads A_ : int = dropout A_ : str = attention_dropout A_ : Optional[int] = activation_dropout A_ : str = activation_function A_ : List[Any] = init_std A_ : Union[str, Any] = decoder_layerdrop A_ : Any = use_cache A_ : Optional[Any] = decoder_layers A_ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True A_ : Optional[Any] = max_target_positions super().__init__( pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , decoder_start_token_id=_lowerCamelCase , **_lowerCamelCase , )
4
0
import re import string import numpy as np import datasets snake_case__ = """ Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list. """ snake_case__ = """ Args: predictions: List of predicted texts. references: List of reference texts. regexes_to_ignore: List, defaults to None. Regex expressions of characters to ignore when calculating the exact matches. Note: these regexes are removed from the input data before the changes based on the options below (e.g. ignore_case, ignore_punctuation, ignore_numbers) are applied. ignore_case: Boolean, defaults to False. If true, turns everything to lowercase so that capitalization differences are ignored. ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. Returns: exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive. Examples: >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results[\"exact_match\"], 1)) 25.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True) >>> print(round(results[\"exact_match\"], 1)) 50.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True) >>> print(round(results[\"exact_match\"], 1)) 75.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True) >>> print(round(results[\"exact_match\"], 1)) 100.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It\'s like comparing oranges and apples.\"] >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It\'s like comparing apples and oranges.\"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results[\"exact_match\"], 1)) 33.3 """ snake_case__ = """ """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class UpperCamelCase_ (datasets.Metric ): """simple docstring""" def _a ( self : Optional[Any] ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , reference_urls=[] , ) def _a ( self : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : str=None , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Tuple=False , _lowerCamelCase : Tuple=False , ): """simple docstring""" if regexes_to_ignore is not None: for s in regexes_to_ignore: A_ : Tuple = np.array([re.sub(_a , '''''' , _a ) for x in predictions] ) A_ : Dict = np.array([re.sub(_a , '''''' , _a ) for x in references] ) else: A_ : Optional[int] = np.asarray(_a ) A_ : Optional[int] = np.asarray(_a ) if ignore_case: A_ : str = np.char.lower(_a ) A_ : Optional[Any] = np.char.lower(_a ) if ignore_punctuation: A_ : str = string.punctuation.maketrans('''''' , '''''' , string.punctuation ) A_ : Optional[Any] = np.char.translate(_a , table=_a ) A_ : Optional[Any] = np.char.translate(_a , table=_a ) if ignore_numbers: A_ : Optional[int] = string.digits.maketrans('''''' , '''''' , string.digits ) A_ : Any = np.char.translate(_a , table=_a ) A_ : Union[str, Any] = np.char.translate(_a , table=_a ) A_ : Optional[int] = predictions == references return {"exact_match": np.mean(_a ) * 100}
364
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING snake_case__ = logging.get_logger(__name__) snake_case__ = { """microsoft/table-transformer-detection""": ( """https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json""" ), } class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = 'table-transformer' _lowerCAmelCase = ['past_key_values'] _lowerCAmelCase = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : Any , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Dict=None , _lowerCamelCase : int=3 , _lowerCamelCase : Any=100 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : Any=8 , _lowerCamelCase : Dict=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : int=8 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : Union[str, Any]=256 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : str=0.02 , _lowerCamelCase : Tuple=1.0 , _lowerCamelCase : Dict=False , _lowerCamelCase : str="sine" , _lowerCamelCase : str="resnet50" , _lowerCamelCase : Any=True , _lowerCamelCase : List[str]=False , _lowerCamelCase : Any=1 , _lowerCamelCase : int=5 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Optional[int]=1 , _lowerCamelCase : Any=1 , _lowerCamelCase : Dict=5 , _lowerCamelCase : str=2 , _lowerCamelCase : Union[str, Any]=0.1 , **_lowerCamelCase : int , ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) A_ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : str = backbone_config.get('''model_type''' ) A_ : Optional[int] = CONFIG_MAPPING[backbone_model_type] A_ : List[str] = config_class.from_dict(_lowerCamelCase ) # set timm attributes to None A_ ,A_ ,A_ : Union[str, Any] = None, None, None A_ : Optional[Any] = use_timm_backbone A_ : Optional[int] = backbone_config A_ : Optional[Any] = num_channels A_ : Dict = num_queries A_ : str = d_model A_ : List[str] = encoder_ffn_dim A_ : int = encoder_layers A_ : Optional[Any] = encoder_attention_heads A_ : List[str] = decoder_ffn_dim A_ : Any = decoder_layers A_ : List[str] = decoder_attention_heads A_ : Tuple = dropout A_ : Optional[Any] = attention_dropout A_ : Any = activation_dropout A_ : List[Any] = activation_function A_ : Dict = init_std A_ : Any = init_xavier_std A_ : List[Any] = encoder_layerdrop A_ : int = decoder_layerdrop A_ : Any = encoder_layers A_ : List[str] = auxiliary_loss A_ : List[Any] = position_embedding_type A_ : Optional[Any] = backbone A_ : Tuple = use_pretrained_backbone A_ : List[Any] = dilation # Hungarian matcher A_ : List[str] = class_cost A_ : str = bbox_cost A_ : Union[str, Any] = giou_cost # Loss coefficients A_ : Any = mask_loss_coefficient A_ : Optional[int] = dice_loss_coefficient A_ : Dict = bbox_loss_coefficient A_ : int = giou_loss_coefficient A_ : int = eos_coefficient super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase ) @property def _a ( self : List[Any] ): """simple docstring""" return self.encoder_attention_heads @property def _a ( self : Any ): """simple docstring""" return self.d_model class UpperCamelCase_ (a__ ): """simple docstring""" _lowerCAmelCase = version.parse('1.11' ) @property def _a ( self : Tuple ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ] ) @property def _a ( self : Optional[int] ): """simple docstring""" return 1E-5 @property def _a ( self : str ): """simple docstring""" return 12
4
0
'''simple docstring''' from ..utils import DummyObject, requires_backends class UpperCamelCase_ (metaclass=__SCREAMING_SNAKE_CASE ): """simple docstring""" _lowerCAmelCase = ["torch", "torchsde"] def __init__( self : Optional[int] , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Tuple ): """simple docstring""" requires_backends(self , ['''torch''', '''torchsde'''] ) @classmethod def _a ( cls : Union[str, Any] , *_lowerCamelCase : Dict , **_lowerCamelCase : int ): """simple docstring""" requires_backends(cls , ['''torch''', '''torchsde'''] ) @classmethod def _a ( cls : List[str] , *_lowerCamelCase : str , **_lowerCamelCase : Union[str, Any] ): """simple docstring""" requires_backends(cls , ['''torch''', '''torchsde'''] )
365
'''simple docstring''' import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : List[str]=3 , _lowerCamelCase : Any=32 , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : int=10 , _lowerCamelCase : Union[str, Any]=[8, 16, 32, 64] , _lowerCamelCase : Dict=[1, 1, 2, 1] , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Any="relu" , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Dict=["stage2", "stage3", "stage4"] , _lowerCamelCase : Union[str, Any]=[2, 3, 4] , _lowerCamelCase : Tuple=1 , ): """simple docstring""" A_ : List[str] = parent A_ : List[str] = batch_size A_ : Union[str, Any] = image_size A_ : Tuple = num_channels A_ : Any = embeddings_size A_ : int = hidden_sizes A_ : Optional[Any] = depths A_ : List[Any] = is_training A_ : Optional[int] = use_labels A_ : int = hidden_act A_ : Tuple = num_labels A_ : Union[str, Any] = scope A_ : List[Any] = len(_lowerCamelCase ) A_ : Union[str, Any] = out_features A_ : List[Any] = out_indices A_ : Dict = num_groups def _a ( self : Optional[int] ): """simple docstring""" A_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : Union[str, Any] = None if self.use_labels: A_ : Any = ids_tensor([self.batch_size] , self.num_labels ) A_ : Any = self.get_config() return config, pixel_values, labels def _a ( self : Union[str, Any] ): """simple docstring""" return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def _a ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] ): """simple docstring""" A_ : Any = BitModel(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : int = model(_lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _a ( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] ): """simple docstring""" A_ : Dict = self.num_labels A_ : Optional[Any] = BitForImageClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self : Any , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[Any] ): """simple docstring""" A_ : List[Any] = BitBackbone(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : int = model(_lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None A_ : Optional[Any] = None A_ : int = BitBackbone(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() A_ : Optional[int] = model(_lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _a ( self : List[Any] ): """simple docstring""" A_ : Union[str, Any] = self.prepare_config_and_inputs() A_ ,A_ ,A_ : Union[str, Any] = config_and_inputs A_ : str = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class UpperCamelCase_ (a__, a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () _lowerCAmelCase = ( {'feature-extraction': BitModel, 'image-classification': BitForImageClassification} if is_torch_available() else {} ) _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False _lowerCAmelCase = False def _a ( self : Optional[Any] ): """simple docstring""" A_ : List[str] = BitModelTester(self ) A_ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _a ( self : List[Any] ): """simple docstring""" return @unittest.skip(reason='''Bit does not output attentions''' ) def _a ( self : str ): """simple docstring""" pass @unittest.skip(reason='''Bit does not use inputs_embeds''' ) def _a ( self : Union[str, Any] ): """simple docstring""" pass @unittest.skip(reason='''Bit does not support input and output embeddings''' ) def _a ( self : Any ): """simple docstring""" pass def _a ( self : List[Any] ): """simple docstring""" A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Dict = model_class(_lowerCamelCase ) A_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : int = [*signature.parameters.keys()] A_ : Union[str, Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def _a ( self : Optional[Any] ): """simple docstring""" A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_lowerCamelCase ) def _a ( self : Tuple ): """simple docstring""" A_ ,A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : str = model_class(config=_lowerCamelCase ) for name, module in model.named_modules(): if isinstance(_lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) def _a ( self : int ): """simple docstring""" def check_hidden_states_output(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : int ): A_ : Union[str, Any] = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() with torch.no_grad(): A_ : Union[str, Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) A_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A_ : List[Any] = self.model_tester.num_stages self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common() A_ : Tuple = ['''preactivation''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: A_ : Tuple = layer_type A_ : Optional[Any] = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A_ : List[str] = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) @unittest.skip(reason='''Bit does not use feedforward chunking''' ) def _a ( self : Tuple ): """simple docstring""" pass def _a ( self : str ): """simple docstring""" A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) @slow def _a ( self : Union[str, Any] ): """simple docstring""" for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : List[Any] = BitModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def snake_case__ ( ) -> Optional[int]: A_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class UpperCamelCase_ (unittest.TestCase ): """simple docstring""" @cached_property def _a ( self : List[Any] ): """simple docstring""" return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _a ( self : Dict ): """simple docstring""" A_ : Optional[int] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowerCamelCase ) A_ : Union[str, Any] = self.default_image_processor A_ : Optional[int] = prepare_img() A_ : int = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase ) # forward pass with torch.no_grad(): A_ : Union[str, Any] = model(**_lowerCamelCase ) # verify the logits A_ : Dict = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) A_ : Tuple = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) ) @require_torch class UpperCamelCase_ (a__, unittest.TestCase ): """simple docstring""" _lowerCAmelCase = (BitBackbone,) if is_torch_available() else () _lowerCAmelCase = BitConfig _lowerCAmelCase = False def _a ( self : List[str] ): """simple docstring""" A_ : Union[str, Any] = BitModelTester(self )
4
0
'''simple docstring''' from __future__ import annotations def snake_case__ ( lowerCamelCase__ : list[list[int]] ) -> Optional[int]: A_ : Any = len(a_ ) # We need to create solution object to save path. A_ : str = [[0 for _ in range(a_ )] for _ in range(a_ )] A_ : Optional[Any] = run_maze(a_ , 0 , 0 , a_ ) if solved: print('''\n'''.join(str(a_ ) for row in solutions ) ) else: print('''No solution exists!''' ) return solved def snake_case__ ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : list[list[int]] ) -> Union[str, Any]: A_ : Optional[int] = len(a_ ) # Final check point. if i == j == (size - 1): A_ : Union[str, Any] = 1 return True A_ : int = (not i < 0) and (not j < 0) # Check lower bounds A_ : Dict = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. A_ : List[Any] = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited A_ : Tuple = 1 # check for directions if ( run_maze(a_ , i + 1 , a_ , a_ ) or run_maze(a_ , a_ , j + 1 , a_ ) or run_maze(a_ , i - 1 , a_ , a_ ) or run_maze(a_ , a_ , j - 1 , a_ ) ): return True A_ : Dict = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
366
'''simple docstring''' import pprint import requests snake_case__ = """https://zenquotes.io/api""" def snake_case__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '''/today''' ).json() def snake_case__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '''/random''' ).json() if __name__ == "__main__": snake_case__ = random_quotes() pprint.pprint(response)
4
0
'''simple docstring''' from __future__ import annotations snake_case__ = list[tuple[int, int]] snake_case__ = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] snake_case__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class UpperCamelCase_ : """simple docstring""" def __init__( self : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict , ): """simple docstring""" A_ : List[str] = pos_x A_ : int = pos_y A_ : Union[str, Any] = (pos_y, pos_x) A_ : Optional[Any] = goal_x A_ : List[Any] = goal_y A_ : Union[str, Any] = g_cost A_ : Tuple = parent A_ : List[str] = self.calculate_heuristic() def _a ( self : Optional[int] ): """simple docstring""" A_ : Tuple = abs(self.pos_x - self.goal_x ) A_ : Optional[int] = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self : Union[str, Any] , _lowerCamelCase : Any ): """simple docstring""" return self.f_cost < other.f_cost class UpperCamelCase_ : """simple docstring""" def __init__( self : Any , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] ): """simple docstring""" A_ : Any = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _SCREAMING_SNAKE_CASE ) A_ : int = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , _SCREAMING_SNAKE_CASE ) A_ : Any = [self.start] A_ : Tuple = [] A_ : Any = False def _a ( self : List[str] ): """simple docstring""" while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() A_ : Any = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: A_ : str = True return self.retrace_path(_SCREAMING_SNAKE_CASE ) self.closed_nodes.append(_SCREAMING_SNAKE_CASE ) A_ : Dict = self.get_successors(_SCREAMING_SNAKE_CASE ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(_SCREAMING_SNAKE_CASE ) else: # retrieve the best current path A_ : int = self.open_nodes.pop(self.open_nodes.index(_SCREAMING_SNAKE_CASE ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(_SCREAMING_SNAKE_CASE ) else: self.open_nodes.append(_SCREAMING_SNAKE_CASE ) if not self.reached: return [self.start.pos] return None def _a ( self : Tuple , _lowerCamelCase : int ): """simple docstring""" A_ : Dict = [] for action in delta: A_ : List[Any] = parent.pos_x + action[1] A_ : str = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_SCREAMING_SNAKE_CASE ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _SCREAMING_SNAKE_CASE , ) ) return successors def _a ( self : List[Any] , _lowerCamelCase : Union[str, Any] ): """simple docstring""" A_ : str = node A_ : List[str] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) A_ : Optional[Any] = current_node.parent path.reverse() return path if __name__ == "__main__": snake_case__ = (0, 0) snake_case__ = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print("""------""") snake_case__ = GreedyBestFirst(init, goal) snake_case__ = greedy_bf.search() if path: for pos_x, pos_y in path: snake_case__ = 2 for elem in grid: print(elem)
367
'''simple docstring''' from __future__ import annotations class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[int] , _lowerCamelCase : int ): """simple docstring""" A_ : Union[str, Any] = order # a_{0} ... a_{k} A_ : Union[str, Any] = [1.0] + [0.0] * order # b_{0} ... b_{k} A_ : int = [1.0] + [0.0] * order # x[n-1] ... x[n-k] A_ : str = [0.0] * self.order # y[n-1] ... y[n-k] A_ : Optional[Any] = [0.0] * self.order def _a ( self : Dict , _lowerCamelCase : list[float] , _lowerCamelCase : list[float] ): """simple docstring""" if len(_lowerCamelCase ) < self.order: A_ : Any = [1.0, *a_coeffs] if len(_lowerCamelCase ) != self.order + 1: A_ : List[Any] = ( f'Expected a_coeffs to have {self.order + 1} elements ' f'for {self.order}-order filter, got {len(_lowerCamelCase )}' ) raise ValueError(_lowerCamelCase ) if len(_lowerCamelCase ) != self.order + 1: A_ : Union[str, Any] = ( f'Expected b_coeffs to have {self.order + 1} elements ' f'for {self.order}-order filter, got {len(_lowerCamelCase )}' ) raise ValueError(_lowerCamelCase ) A_ : Tuple = a_coeffs A_ : str = b_coeffs def _a ( self : Tuple , _lowerCamelCase : float ): """simple docstring""" A_ : Any = 0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) A_ : str = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0] A_ : Optional[Any] = self.input_history[:-1] A_ : List[str] = self.output_history[:-1] A_ : Tuple = sample A_ : Tuple = result return result
4
0
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def snake_case__ ( lowerCamelCase__ : str=None ) -> Optional[int]: if subparsers is not None: A_ : Tuple = subparsers.add_parser('''test''' ) else: A_ : str = argparse.ArgumentParser('''Accelerate test command''' ) parser.add_argument( '''--config_file''' , default=a__ , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , ) if subparsers is not None: parser.set_defaults(func=a__ ) return parser def snake_case__ ( lowerCamelCase__ : Dict ) -> Union[str, Any]: A_ : Optional[int] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] ) if args.config_file is None: A_ : int = script_name else: A_ : str = f'--config_file={args.config_file} {script_name}' A_ : Dict = ['''accelerate-launch'''] + test_args.split() A_ : str = execute_subprocess_async(a__ , env=os.environ.copy() ) if result.returncode == 0: print('''Test is a success! You are ready for your distributed training!''' ) def snake_case__ ( ) -> Optional[int]: A_ : Dict = test_command_parser() A_ : Optional[Any] = parser.parse_args() test_command(a__ ) if __name__ == "__main__": main()
368
'''simple docstring''' class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[Any] , _lowerCamelCase : Union[str, Any] ): """simple docstring""" A_ : Union[str, Any] = val A_ : Tuple = None A_ : Any = None def _a ( self : Tuple , _lowerCamelCase : List[Any] ): """simple docstring""" if self.val: if val < self.val: if self.left is None: A_ : int = Node(_lowerCamelCase ) else: self.left.insert(_lowerCamelCase ) elif val > self.val: if self.right is None: A_ : List[str] = Node(_lowerCamelCase ) else: self.right.insert(_lowerCamelCase ) else: A_ : Any = val def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] ) -> str: # Recursive traversal if root: inorder(root.left , lowerCamelCase__ ) res.append(root.val ) inorder(root.right , lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> Tuple: # Build BST if len(lowerCamelCase__ ) == 0: return arr A_ : Dict = Node(arr[0] ) for i in range(1 , len(lowerCamelCase__ ) ): root.insert(arr[i] ) # Traverse BST in order. A_ : Tuple = [] inorder(lowerCamelCase__ , lowerCamelCase__ ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
4
0
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : int = 4_0_0_0_0_0_0 ) -> List[str]: A_ : int = [0, 1] A_ : List[Any] = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1] ) if fib[i + 2] > n: break i += 1 A_ : Tuple = 0 for j in range(len(__SCREAMING_SNAKE_CASE ) - 1 ): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(F'{solution() = }')
369
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : list ) -> list: if len(lowerCamelCase__ ) <= 1: return [tuple(lowerCamelCase__ )] A_ : List[str] = [] def generate(lowerCamelCase__ : int , lowerCamelCase__ : list ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , lowerCamelCase__ ) for i in range(k - 1 ): if k % 2 == 0: # k is even A_ ,A_ : Optional[int] = arr[k - 1], arr[i] else: # k is odd A_ ,A_ : Union[str, Any] = arr[k - 1], arr[0] generate(k - 1 , lowerCamelCase__ ) generate(len(lowerCamelCase__ ) , lowerCamelCase__ ) return res if __name__ == "__main__": snake_case__ = input("""Enter numbers separated by a comma:\n""").strip() snake_case__ = [int(item) for item in user_input.split(""",""")] print(heaps(arr))
4
0