code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __a :int = get_tests_dir('fixtures/test_sentencepiece_no_bos.model') @require_sentencepiece @require_tokenizers class _a ( snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : int = PegasusTokenizer _lowerCamelCase : Dict = PegasusTokenizerFast _lowerCamelCase : Union[str, Any] = True _lowerCamelCase : str = True def __A ( self : Dict ): super().setUp() # We have a SentencePiece fixture for testing A_ = PegasusTokenizer(UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __A ( self : str ): return PegasusTokenizer.from_pretrained("google/pegasus-large" ) def __A ( self : List[Any] , **UpperCAmelCase : Tuple ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase ) def __A ( self : Optional[Any] , UpperCAmelCase : Dict ): return ("This is a test", "This is a test") def __A ( self : Optional[int] ): A_ = "</s>" A_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase ) def __A ( self : Dict ): A_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<pad>" ) self.assertEqual(vocab_keys[1] , "</s>" ) self.assertEqual(vocab_keys[-1] , "v" ) self.assertEqual(len(UpperCAmelCase ) , 1103 ) def __A ( self : Union[str, Any] ): self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def __A ( self : Dict ): A_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) A_ = self.tokenizer_class.from_pretrained(self.tmpdirname ) A_ = ( "Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important" " </s> <pad> <pad> <pad>" ) A_ = rust_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase , add_special_tokens=UpperCAmelCase ).input_ids[0] A_ = py_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase , add_special_tokens=UpperCAmelCase ).input_ids[0] self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) def __A ( self : Dict ): A_ = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word A_ = "<mask_1> To ensure a <mask_2> flow of bank resolutions." A_ = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1] A_ = tokenizer([raw_input_str] , return_tensors=UpperCAmelCase ).input_ids[0] self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) def __A ( self : Union[str, Any] ): A_ = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 96103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 A_ = "To ensure a smooth flow of bank resolutions." A_ = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1] A_ = tokenizer([raw_input_str] , return_tensors=UpperCAmelCase ).input_ids[0] self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def __A ( self : str ): A_ = ["This is going to be way too long." * 150, "short example"] A_ = ["not super long but more than 5 tokens", "tiny"] A_ = self._large_tokenizer(UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , return_tensors="pt" ) A_ = self._large_tokenizer( text_target=UpperCAmelCase , max_length=5 , padding=UpperCAmelCase , truncation=UpperCAmelCase , return_tensors="pt" ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(UpperCAmelCase ) == 2 # input_ids, attention_mask. @slow def __A ( self : Optional[Any] ): # fmt: off A_ = {"input_ids": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , ) @require_sentencepiece @require_tokenizers class _a ( snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : Union[str, Any] = PegasusTokenizer _lowerCamelCase : int = PegasusTokenizerFast _lowerCamelCase : Tuple = True _lowerCamelCase : int = True def __A ( self : Dict ): super().setUp() # We have a SentencePiece fixture for testing A_ = PegasusTokenizer(UpperCAmelCase , offset=0 , mask_token_sent=UpperCAmelCase , mask_token="[MASK]" ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __A ( self : Optional[int] ): return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" ) def __A ( self : Union[str, Any] , **UpperCAmelCase : Optional[Any] ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase ) def __A ( self : List[str] , UpperCAmelCase : List[str] ): return ("This is a test", "This is a test") def __A ( self : Optional[Any] ): A_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) A_ = self.tokenizer_class.from_pretrained(self.tmpdirname ) A_ = ( "Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>" " <pad> <pad> <pad>" ) A_ = rust_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase , add_special_tokens=UpperCAmelCase ).input_ids[0] A_ = py_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase , add_special_tokens=UpperCAmelCase ).input_ids[0] self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) @require_torch def __A ( self : List[Any] ): A_ = ["This is going to be way too long." * 1000, "short example"] A_ = ["not super long but more than 5 tokens", "tiny"] A_ = self._large_tokenizer(UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , return_tensors="pt" ) A_ = self._large_tokenizer( text_target=UpperCAmelCase , max_length=5 , padding=UpperCAmelCase , truncation=UpperCAmelCase , return_tensors="pt" ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(UpperCAmelCase ) == 2 # input_ids, attention_mask. def __A ( self : Union[str, Any] ): A_ = ( "This is an example string that is used to test the original TF implementation against the HF" " implementation" ) A_ = self._large_tokenizer(UpperCAmelCase ).input_ids self.assertListEqual( UpperCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
329
import math __a :Union[str, Any] = 10 __a :Union[str, Any] = 7 __a :int = BALLS_PER_COLOUR * NUM_COLOURS def __snake_case ( __UpperCamelCase : int = 20 ): """simple docstring""" A_ = math.comb(__UpperCamelCase ,__UpperCamelCase ) A_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR ,__UpperCamelCase ) A_ = NUM_COLOURS * (1 - missing_colour / total) return f'''{result:.9f}''' if __name__ == "__main__": print(solution(20))
329
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __a :Dict = logging.get_logger(__name__) __a :int = { 'google/realm-cc-news-pretrained-embedder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-encoder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-scorer': ( 'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-openqa': ( 'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json' ), 'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json', 'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json', 'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json', 'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json', # See all REALM models at https://huggingface.co/models?filter=realm } class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : List[Any] = 'realm' def __init__( self : Union[str, Any] , UpperCAmelCase : Optional[Any]=30522 , UpperCAmelCase : List[str]=768 , UpperCAmelCase : Optional[Any]=128 , UpperCAmelCase : str=12 , UpperCAmelCase : Dict=12 , UpperCAmelCase : Optional[Any]=8 , UpperCAmelCase : Any=3072 , UpperCAmelCase : Union[str, Any]="gelu_new" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : int=512 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=1E-12 , UpperCAmelCase : List[Any]=256 , UpperCAmelCase : Optional[int]=10 , UpperCAmelCase : List[str]=1E-3 , UpperCAmelCase : Any=5 , UpperCAmelCase : List[Any]=320 , UpperCAmelCase : Optional[Any]=13353718 , UpperCAmelCase : Tuple=5000 , UpperCAmelCase : List[str]=1 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Union[str, Any]=2 , **UpperCAmelCase : List[str] , ): super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase ) # Common config A_ = vocab_size A_ = max_position_embeddings A_ = hidden_size A_ = retriever_proj_size A_ = num_hidden_layers A_ = num_attention_heads A_ = num_candidates A_ = intermediate_size A_ = hidden_act A_ = hidden_dropout_prob A_ = attention_probs_dropout_prob A_ = initializer_range A_ = type_vocab_size A_ = layer_norm_eps # Reader config A_ = span_hidden_size A_ = max_span_width A_ = reader_layer_norm_eps A_ = reader_beam_size A_ = reader_seq_len # Retrieval config A_ = num_block_records A_ = searcher_beam_size
329
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer __a :Optional[Any] = logging.get_logger(__name__) __a :Any = {'vocab_file': 'vocab.txt'} __a :Any = { 'vocab_file': { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt', } } __a :List[str] = { 'YituTech/conv-bert-base': 512, 'YituTech/conv-bert-medium-small': 512, 'YituTech/conv-bert-small': 512, } __a :List[str] = { 'YituTech/conv-bert-base': {'do_lower_case': True}, 'YituTech/conv-bert-medium-small': {'do_lower_case': True}, 'YituTech/conv-bert-small': {'do_lower_case': True}, } class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Tuple = VOCAB_FILES_NAMES _lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION _lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase : Union[str, Any] = ConvBertTokenizer def __init__( self : Optional[int] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : int="[UNK]" , UpperCAmelCase : str="[SEP]" , UpperCAmelCase : Union[str, Any]="[PAD]" , UpperCAmelCase : Tuple="[CLS]" , UpperCAmelCase : Tuple="[MASK]" , UpperCAmelCase : Any=True , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : List[str] , ): super().__init__( UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , ) A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars ): A_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) ) A_ = do_lower_case A_ = strip_accents A_ = tokenize_chinese_chars A_ = normalizer_class(**UpperCAmelCase ) A_ = do_lower_case def __A ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Dict=None ): A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __A ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ): A_ = [self.sep_token_id] A_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ): A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase ) return tuple(UpperCAmelCase )
329
1
import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) __a :int = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu') ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation='relu')) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation='relu')) classifier.add(layers.Dense(units=1, activation='sigmoid')) # Compiling the CNN classifier.compile( optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') __a :Union[str, Any] = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) __a :str = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) __a :Any = train_datagen.flow_from_directory( 'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) __a :List[Any] = test_datagen.flow_from_directory( 'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save('cnn.h5') # Part 3 - Making new predictions __a :List[Any] = tf.keras.preprocessing.image.load_img( 'dataset/single_prediction/image.png', target_size=(64, 64) ) __a :Optional[int] = tf.keras.preprocessing.image.img_to_array(test_image) __a :Optional[Any] = np.expand_dims(test_image, axis=0) __a :str = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: __a :int = 'Normal' if result[0][0] == 1: __a :Any = 'Abnormality detected'
329
import warnings from ...utils import logging from .image_processing_videomae import VideoMAEImageProcessor __a :Optional[Any] = logging.get_logger(__name__) class _a ( snake_case_ ): """simple docstring""" def __init__( self : List[str] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ): warnings.warn( "The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use VideoMAEImageProcessor instead." , UpperCAmelCase , ) super().__init__(*UpperCAmelCase , **UpperCAmelCase )
329
1
import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class _a : """simple docstring""" def __A ( self : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple ): return None class _a : """simple docstring""" def __A ( self : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] ): return None class _a ( unittest.TestCase ): """simple docstring""" _lowerCamelCase : Dict = [ # (model_name, model_kwargs) ('bert-base-cased', {}), ('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def __A ( self : Union[str, Any] ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(UpperCAmelCase , "tf" , 12 , **UpperCAmelCase ) @require_torch @slow def __A ( self : int ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(UpperCAmelCase , "pt" , 12 , **UpperCAmelCase ) @require_torch @slow def __A ( self : Optional[int] ): from transformers import BertModel A_ = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"] with NamedTemporaryFile(mode="w+t" ) as vocab_file: vocab_file.write("\n".join(UpperCAmelCase ) ) vocab_file.flush() A_ = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: A_ = BertModel(BertConfig(vocab_size=len(UpperCAmelCase ) ) ) model.save_pretrained(UpperCAmelCase ) self._test_export(UpperCAmelCase , "pt" , 12 , UpperCAmelCase ) @require_tf @slow def __A ( self : int ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: A_ = self._test_export(UpperCAmelCase , "tf" , 12 , **UpperCAmelCase ) A_ = quantize(Path(UpperCAmelCase ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(UpperCAmelCase ).stat().st_size: self.fail("Quantized model is bigger than initial ONNX model" ) @require_torch @slow def __A ( self : Optional[int] ): for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: A_ = self._test_export(UpperCAmelCase , "pt" , 12 , **UpperCAmelCase ) A_ = quantize(UpperCAmelCase ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(UpperCAmelCase ).stat().st_size: self.fail("Quantized model is bigger than initial ONNX model" ) def __A ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any=None , **UpperCAmelCase : List[str] ): try: # Compute path with TemporaryDirectory() as tempdir: A_ = Path(UpperCAmelCase ).joinpath("model.onnx" ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) return path except Exception as e: self.fail(UpperCAmelCase ) @require_torch @require_tokenizers @slow def __A ( self : Tuple ): from transformers import BertModel A_ = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) ) A_ = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" ) self._test_infer_dynamic_axis(UpperCAmelCase , UpperCAmelCase , "pt" ) @require_tf @require_tokenizers @slow def __A ( self : Optional[Any] ): from transformers import TFBertModel A_ = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) ) A_ = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" ) self._test_infer_dynamic_axis(UpperCAmelCase , UpperCAmelCase , "tf" ) def __A ( self : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : int ): A_ = FeatureExtractionPipeline(UpperCAmelCase , UpperCAmelCase ) A_ = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"] A_ , A_ , A_ , A_ = infer_shapes(UpperCAmelCase , UpperCAmelCase ) # Assert all variables are present self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , UpperCAmelCase ) self.assertSequenceEqual(variable_names[3:] , UpperCAmelCase ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} ) self.assertDictEqual(shapes["output_1"] , {0: "batch"} ) def __A ( self : Optional[int] ): A_ = ["input_ids", "attention_mask", "token_type_ids"] A_ = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]} A_ , A_ = ensure_valid_input(FuncContiguousArgs() , UpperCAmelCase , UpperCAmelCase ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(UpperCAmelCase ) , 3 ) # Should have exactly the same input names self.assertEqual(set(UpperCAmelCase ) , set(UpperCAmelCase ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(UpperCAmelCase , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) A_ , A_ = ensure_valid_input(FuncNonContiguousArgs() , UpperCAmelCase , UpperCAmelCase ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(UpperCAmelCase ) , 1 ) self.assertEqual(len(UpperCAmelCase ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens["input_ids"] ) self.assertEqual(ordered_input_names[0] , "input_ids" ) def __A ( self : Union[str, Any] ): A_ = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" ) self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
329
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _a : """simple docstring""" @staticmethod def __A ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Union[str, Any] ): pass @is_pipeline_test @require_vision class _a ( unittest.TestCase ): """simple docstring""" @require_torch def __A ( self : List[str] ): A_ = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , ) A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(UpperCAmelCase ) , [ [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}], ] , ) A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], ] , ) @require_tf def __A ( self : int ): A_ = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" ) A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , ) A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], ] , ) @slow @require_torch def __A ( self : Any ): A_ = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , ) # This is an image of 2 cats with remotes and no planes A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , ) @slow @require_tf def __A ( self : Optional[Any] ): A_ = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" ) # This is an image of 2 cats with remotes and no planes A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , )
329
1
from __future__ import annotations from functools import lru_cache from math import ceil __a :Union[str, Any] = 100 __a :Union[str, Any] = set(range(3, NUM_PRIMES, 2)) primes.add(2) __a :int for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=100 ) def __snake_case ( __UpperCamelCase : int ): """simple docstring""" if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} A_ = set() A_ = 42 A_ = 42 for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def __snake_case ( __UpperCamelCase : int = 5000 ): """simple docstring""" for number_to_partition in range(1 ,__UpperCamelCase ): if len(partition(__UpperCamelCase ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(F"{solution() = }")
329
import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict=10 ): """simple docstring""" A_ = [] for _ in range(__UpperCamelCase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Tuple=10 ): """simple docstring""" A_ = [] for step in range(__UpperCamelCase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: A_ = os.path.join(__UpperCamelCase ,"schedule.bin" ) torch.save(scheduler.state_dict() ,__UpperCamelCase ) A_ = torch.load(__UpperCamelCase ) scheduler.load_state_dict(__UpperCamelCase ) return lrs @require_torch class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : Any , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ): self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) ) for a, b in zip(UpperCAmelCase , UpperCAmelCase ): self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase ) def __A ( self : List[Any] ): A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase ) A_ = torch.tensor([0.4, 0.2, -0.5] ) A_ = nn.MSELoss() # No warmup, constant schedule, no gradient clipping A_ = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 ) for _ in range(100 ): A_ = criterion(UpperCAmelCase , UpperCAmelCase ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) def __A ( self : Dict ): A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase ) A_ = torch.tensor([0.4, 0.2, -0.5] ) A_ = nn.MSELoss() # No warmup, constant schedule, no gradient clipping A_ = Adafactor( params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCAmelCase , weight_decay=0.0 , relative_step=UpperCAmelCase , scale_parameter=UpperCAmelCase , warmup_init=UpperCAmelCase , ) for _ in range(1000 ): A_ = criterion(UpperCAmelCase , UpperCAmelCase ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) @require_torch class _a ( unittest.TestCase ): """simple docstring""" _lowerCamelCase : Optional[int] = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None _lowerCamelCase : Any = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None _lowerCamelCase : Any = 1_0 def __A ( self : str , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Dict=None ): self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) ) for a, b in zip(UpperCAmelCase , UpperCAmelCase ): self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase , msg=UpperCAmelCase ) def __A ( self : List[Any] ): A_ = {"num_warmup_steps": 2, "num_training_steps": 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) A_ = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"num_warmup_steps": 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, "num_cycles": 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, "power": 2.0, "lr_end": 1E-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {"num_warmup_steps": 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): A_ , A_ = data A_ = scheduler_func(self.optimizer , **UpperCAmelCase ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) A_ = unwrap_schedule(UpperCAmelCase , self.num_steps ) self.assertListAlmostEqual( UpperCAmelCase , UpperCAmelCase , tol=1E-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , ) A_ = scheduler_func(self.optimizer , **UpperCAmelCase ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(UpperCAmelCase ) # wrap to test picklability of the schedule A_ = unwrap_and_save_reload_schedule(UpperCAmelCase , self.num_steps ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase , msg=f'''failed for {scheduler_func} in save and reload''' ) class _a : """simple docstring""" def __init__( self : List[str] , UpperCAmelCase : List[str] ): A_ = fn def __call__( self : Union[str, Any] , *UpperCAmelCase : str , **UpperCAmelCase : Optional[Any] ): return self.fn(*UpperCAmelCase , **UpperCAmelCase ) @classmethod def __A ( self : Dict , UpperCAmelCase : List[str] ): A_ = list(map(self , scheduler.lr_lambdas ) )
329
1
def __snake_case ( __UpperCamelCase : list ): """simple docstring""" if any(not isinstance(__UpperCamelCase ,__UpperCamelCase ) or x < 0 for x in sequence ): raise TypeError("Sequence must be list of non-negative integers" ) for _ in range(len(__UpperCamelCase ) ): for i, (rod_upper, rod_lower) in enumerate(zip(__UpperCamelCase ,sequence[1:] ) ): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
329
import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def __snake_case ( __UpperCamelCase : Optional[int] ): # picklable for multiprocessing """simple docstring""" return x.sum() def __snake_case ( __UpperCamelCase : List[str] ): # picklable for multiprocessing """simple docstring""" return i + 1 @dataclass class _a : """simple docstring""" _lowerCamelCase : int _lowerCamelCase : str class _a ( snake_case_ ): """simple docstring""" def __A ( self : Dict ): A_ = {} A_ = [] A_ = 1 A_ = [1, 2] A_ = {"a": 1, "b": 2} A_ = {"a": [1, 2], "b": [3, 4]} A_ = {"a": {"1": 1}, "b": 2} A_ = {"a": 1, "b": 2, "c": 3, "d": 4} A_ = {} A_ = [] A_ = 2 A_ = [2, 3] A_ = {"a": 2, "b": 3} A_ = {"a": [2, 3], "b": [4, 5]} A_ = {"a": {"1": 2}, "b": 3} A_ = {"a": 2, "b": 3, "c": 4, "d": 5} self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) A_ = 2 self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) A_ = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )} A_ = {"a": 2, "b": 0, "c": 2} A_ = { "a": np.eye(2 ).astype(UpperCAmelCase ), "b": np.zeros(3 ).astype(UpperCAmelCase ), "c": np.ones(2 ).astype(UpperCAmelCase ), } self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual( {k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual( {k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(UpperCAmelCase ): # can't pickle a local lambda map_nested(lambda UpperCAmelCase : x + 1 , UpperCAmelCase , num_proc=UpperCAmelCase ) def __A ( self : List[str] ): A_ = {"a": 1, "b": 2} A_ = {"a": 3, "b": 4} A_ = {"a": 5, "b": 6} A_ = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ) , UpperCAmelCase ) def __A ( self : Any ): class _a : """simple docstring""" _lowerCamelCase : int = 'bar' A_ = Foo() self.assertEqual(foo.my_attr , "bar" ) with temporary_assignment(UpperCAmelCase , "my_attr" , "BAR" ): self.assertEqual(foo.my_attr , "BAR" ) self.assertEqual(foo.my_attr , "bar" ) @pytest.mark.parametrize( "iterable_length, num_proc, expected_num_proc" ,[ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ] ,) def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[Any] ): """simple docstring""" with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch( "datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool: A_ = {f'''{i}''': i for i in range(__UpperCamelCase )} A_ = map_nested(lambda __UpperCamelCase : x + 10 ,__UpperCamelCase ,num_proc=__UpperCamelCase ,parallel_min_length=16 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class _a ( snake_case_ ): """simple docstring""" @require_tf def __A ( self : Union[str, Any] ): import tensorflow as tf from tensorflow.keras import layers A_ = layers.Dense(2 ) def gen_random_output(): A_ = tf.random.uniform((1, 3) ) return model(UpperCAmelCase ).numpy() with temp_seed(42 , set_tensorflow=UpperCAmelCase ): A_ = gen_random_output() with temp_seed(42 , set_tensorflow=UpperCAmelCase ): A_ = gen_random_output() A_ = gen_random_output() np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @require_torch def __A ( self : Optional[int] ): import torch def gen_random_output(): A_ = torch.nn.Linear(3 , 2 ) A_ = torch.rand(1 , 3 ) return model(UpperCAmelCase ).detach().numpy() with temp_seed(42 , set_pytorch=UpperCAmelCase ): A_ = gen_random_output() with temp_seed(42 , set_pytorch=UpperCAmelCase ): A_ = gen_random_output() A_ = gen_random_output() np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) def __A ( self : Any ): def gen_random_output(): return np.random.rand(1 , 3 ) with temp_seed(42 ): A_ = gen_random_output() with temp_seed(42 ): A_ = gen_random_output() A_ = gen_random_output() np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @pytest.mark.parametrize("input_data" ,[{}] ) def __snake_case ( __UpperCamelCase : str ): """simple docstring""" A_ = NestedDataStructure(__UpperCamelCase ).data assert output_data == input_data @pytest.mark.parametrize( "data, expected_output" ,[ ({}, []), ([], []), ("foo", ["foo"]), (["foo", "bar"], ["foo", "bar"]), ([["foo", "bar"]], ["foo", "bar"]), ([[["foo"], ["bar"]]], ["foo", "bar"]), ([[["foo"], "bar"]], ["foo", "bar"]), ({"a": 1, "b": 2}, [1, 2]), ({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]), ({"a": {"1": 1}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": [2]}, [1, 2]), ] ,) def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Any ): """simple docstring""" A_ = NestedDataStructure(__UpperCamelCase ).flatten() assert output == expected_output def __snake_case ( ): """simple docstring""" A_ = A(x=1 ,y="foobar" ) A_ = {"x": 1, "y": "foobar"} assert asdict(__UpperCamelCase ) == expected_output A_ = {"a": {"b": A(x=10 ,y="foo" )}, "c": [A(x=20 ,y="bar" )]} A_ = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]} assert asdict(__UpperCamelCase ) == expected_output with pytest.raises(__UpperCamelCase ): asdict([1, A(x=10 ,y="foo" )] ) def __snake_case ( __UpperCamelCase : str ): """simple docstring""" return text.split() def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def __snake_case ( ): """simple docstring""" with Pool(2 ) as pool: A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) ) assert out.count("hello" ) == 10 assert out.count("there" ) == 10 assert len(__UpperCamelCase ) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) ) assert out.count("hello" ) == 10 assert out.count("there" ) == 10 assert len(__UpperCamelCase ) == 20 # check that we get items as fast as possible with Pool(2 ) as pool: A_ = [] for yield_time, content in iflatmap_unordered( __UpperCamelCase ,_aseconds_generator_of_aitems_with_timing ,kwargs_iterable=[{"content": "a"}, {"content": "b"}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(__UpperCamelCase ) assert out.count("a" ) == 2 assert out.count("b" ) == 2 assert len(__UpperCamelCase ) == 4
329
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() __a :Optional[int] = logging.get_logger(__name__) def __snake_case ( __UpperCamelCase : str ): """simple docstring""" A_ = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: A_ = 192 A_ = 768 A_ = 12 A_ = 3 A_ = [800, 1333] A_ = False elif yolos_name == "yolos_s_dWr": A_ = 330 A_ = 14 A_ = 6 A_ = 1320 elif "yolos_s" in yolos_name: A_ = 384 A_ = 1536 A_ = 12 A_ = 6 elif "yolos_b" in yolos_name: A_ = [800, 1344] A_ = 91 A_ = "huggingface/label-files" A_ = "coco-detection-id2label.json" A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) ) A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()} A_ = idalabel A_ = {v: k for k, v in idalabel.items()} return config def __snake_case ( __UpperCamelCase : dict ,__UpperCamelCase : YolosConfig ,__UpperCamelCase : bool = False ): """simple docstring""" for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A_ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) A_ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict A_ = in_proj_weight[: config.hidden_size, :] A_ = in_proj_bias[: config.hidden_size] A_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A_ = in_proj_weight[-config.hidden_size :, :] A_ = in_proj_bias[-config.hidden_size :] def __snake_case ( __UpperCamelCase : str ): """simple docstring""" if "backbone" in name: A_ = name.replace("backbone" ,"vit" ) if "cls_token" in name: A_ = name.replace("cls_token" ,"embeddings.cls_token" ) if "det_token" in name: A_ = name.replace("det_token" ,"embeddings.detection_tokens" ) if "mid_pos_embed" in name: A_ = name.replace("mid_pos_embed" ,"encoder.mid_position_embeddings" ) if "pos_embed" in name: A_ = name.replace("pos_embed" ,"embeddings.position_embeddings" ) if "patch_embed.proj" in name: A_ = name.replace("patch_embed.proj" ,"embeddings.patch_embeddings.projection" ) if "blocks" in name: A_ = name.replace("blocks" ,"encoder.layer" ) if "attn.proj" in name: A_ = name.replace("attn.proj" ,"attention.output.dense" ) if "attn" in name: A_ = name.replace("attn" ,"attention.self" ) if "norm1" in name: A_ = name.replace("norm1" ,"layernorm_before" ) if "norm2" in name: A_ = name.replace("norm2" ,"layernorm_after" ) if "mlp.fc1" in name: A_ = name.replace("mlp.fc1" ,"intermediate.dense" ) if "mlp.fc2" in name: A_ = name.replace("mlp.fc2" ,"output.dense" ) if "class_embed" in name: A_ = name.replace("class_embed" ,"class_labels_classifier" ) if "bbox_embed" in name: A_ = name.replace("bbox_embed" ,"bbox_predictor" ) if "vit.norm" in name: A_ = name.replace("vit.norm" ,"vit.layernorm" ) return name def __snake_case ( __UpperCamelCase : dict ,__UpperCamelCase : YolosForObjectDetection ): """simple docstring""" for key in orig_state_dict.copy().keys(): A_ = orig_state_dict.pop(__UpperCamelCase ) if "qkv" in key: A_ = key.split("." ) A_ = int(key_split[2] ) A_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: A_ = val[:dim, :] A_ = val[ dim : dim * 2, : ] A_ = val[-dim:, :] else: A_ = val[:dim] A_ = val[dim : dim * 2] A_ = val[-dim:] else: A_ = val return orig_state_dict def __snake_case ( ): """simple docstring""" A_ = "http://images.cocodataset.org/val2017/000000039769.jpg" A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw ) return im @torch.no_grad() def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : bool = False ): """simple docstring""" A_ = get_yolos_config(__UpperCamelCase ) # load original state_dict A_ = torch.load(__UpperCamelCase ,map_location="cpu" )["model"] # load 🤗 model A_ = YolosForObjectDetection(__UpperCamelCase ) model.eval() A_ = convert_state_dict(__UpperCamelCase ,__UpperCamelCase ) model.load_state_dict(__UpperCamelCase ) # Check outputs on an image, prepared by YolosImageProcessor A_ = 800 if yolos_name != "yolos_ti" else 512 A_ = YolosImageProcessor(format="coco_detection" ,size=__UpperCamelCase ) A_ = image_processor(images=prepare_img() ,return_tensors="pt" ) A_ = model(**__UpperCamelCase ) A_ , A_ = outputs.logits, outputs.pred_boxes A_ , A_ = None, None if yolos_name == "yolos_ti": A_ = torch.tensor( [[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] ) A_ = torch.tensor( [[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] ) elif yolos_name == "yolos_s_200_pre": A_ = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ) A_ = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ) elif yolos_name == "yolos_s_300_pre": A_ = torch.tensor( [[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] ) A_ = torch.tensor( [[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] ) elif yolos_name == "yolos_s_dWr": A_ = torch.tensor( [[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] ) A_ = torch.tensor( [[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] ) elif yolos_name == "yolos_base": A_ = torch.tensor( [[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] ) A_ = torch.tensor( [[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] ) else: raise ValueError(f'''Unknown yolos_name: {yolos_name}''' ) assert torch.allclose(logits[0, :3, :3] ,__UpperCamelCase ,atol=1E-4 ) assert torch.allclose(pred_boxes[0, :3, :3] ,__UpperCamelCase ,atol=1E-4 ) Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__UpperCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__UpperCamelCase ) if push_to_hub: A_ = { "yolos_ti": "yolos-tiny", "yolos_s_200_pre": "yolos-small", "yolos_s_300_pre": "yolos-small-300", "yolos_s_dWr": "yolos-small-dwr", "yolos_base": "yolos-base", } print("Pushing to the hub..." ) A_ = model_mapping[yolos_name] image_processor.push_to_hub(__UpperCamelCase ,organization="hustvl" ) model.push_to_hub(__UpperCamelCase ,organization="hustvl" ) if __name__ == "__main__": __a :Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--yolos_name', default='yolos_s_200_pre', type=str, help=( 'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',' ' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.' ), ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __a :str = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
329
import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" if ( (cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F) or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) # or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) # or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) # or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) # or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) # or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F) or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) # ): # return True return False def __snake_case ( __UpperCamelCase : str ): """simple docstring""" for char in word: A_ = ord(__UpperCamelCase ) if not _is_chinese_char(__UpperCamelCase ): return 0 return 1 def __snake_case ( __UpperCamelCase : List[str] ): """simple docstring""" A_ = set() for token in tokens: A_ = len(__UpperCamelCase ) > 1 and is_chinese(__UpperCamelCase ) if chinese_word: word_set.add(__UpperCamelCase ) A_ = list(__UpperCamelCase ) return word_list def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : set() ): """simple docstring""" if not chinese_word_set: return bert_tokens A_ = max([len(__UpperCamelCase ) for w in chinese_word_set] ) A_ = bert_tokens A_ , A_ = 0, len(__UpperCamelCase ) while start < end: A_ = True if is_chinese(bert_word[start] ): A_ = min(end - start ,__UpperCamelCase ) for i in range(__UpperCamelCase ,1 ,-1 ): A_ = "".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 ,start + i ): A_ = "##" + bert_word[j] A_ = start + i A_ = False break if single_word: start += 1 return bert_word def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : LTP ,__UpperCamelCase : BertTokenizer ): """simple docstring""" A_ = [] for i in range(0 ,len(__UpperCamelCase ) ,100 ): A_ = ltp_tokenizer.seg(lines[i : i + 100] )[0] A_ = [get_chinese_word(__UpperCamelCase ) for r in res] ltp_res.extend(__UpperCamelCase ) assert len(__UpperCamelCase ) == len(__UpperCamelCase ) A_ = [] for i in range(0 ,len(__UpperCamelCase ) ,100 ): A_ = bert_tokenizer(lines[i : i + 100] ,add_special_tokens=__UpperCamelCase ,truncation=__UpperCamelCase ,max_length=512 ) bert_res.extend(res["input_ids"] ) assert len(__UpperCamelCase ) == len(__UpperCamelCase ) A_ = [] for input_ids, chinese_word in zip(__UpperCamelCase ,__UpperCamelCase ): A_ = [] for id in input_ids: A_ = bert_tokenizer._convert_id_to_token(__UpperCamelCase ) input_tokens.append(__UpperCamelCase ) A_ = add_sub_symbol(__UpperCamelCase ,__UpperCamelCase ) A_ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(__UpperCamelCase ): if token[:2] == "##": A_ = token[2:] # save chinese tokens' pos if len(__UpperCamelCase ) == 1 and _is_chinese_char(ord(__UpperCamelCase ) ): ref_id.append(__UpperCamelCase ) ref_ids.append(__UpperCamelCase ) assert len(__UpperCamelCase ) == len(__UpperCamelCase ) return ref_ids def __snake_case ( __UpperCamelCase : Dict ): """simple docstring""" with open(args.file_name ,"r" ,encoding="utf-8" ) as f: A_ = f.readlines() A_ = [line.strip() for line in data if len(__UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' A_ = LTP(args.ltp ) # faster in GPU device A_ = BertTokenizer.from_pretrained(args.bert ) A_ = prepare_ref(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) with open(args.save_path ,"w" ,encoding="utf-8" ) as f: A_ = [json.dumps(__UpperCamelCase ) + "\n" for ref in ref_ids] f.writelines(__UpperCamelCase ) if __name__ == "__main__": __a :List[Any] = argparse.ArgumentParser(description='prepare_chinese_ref') parser.add_argument( '--file_name', type=str, default='./resources/chinese-demo.txt', help='file need process, same as training data in lm', ) parser.add_argument( '--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path' ) parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer') parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res') __a :Dict = parser.parse_args() main(args)
329
1
import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _a : """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int]=13 , UpperCAmelCase : List[Any]=32 , UpperCAmelCase : Dict=3 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : str=[10, 20, 30, 40] , UpperCAmelCase : str=[2, 2, 3, 2] , UpperCAmelCase : Dict=True , UpperCAmelCase : str=True , UpperCAmelCase : str=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Dict=10 , UpperCAmelCase : Optional[Any]=0.02 , UpperCAmelCase : Any=["stage2", "stage3", "stage4"] , UpperCAmelCase : Optional[Any]=[2, 3, 4] , UpperCAmelCase : List[str]=None , ): A_ = parent A_ = batch_size A_ = image_size A_ = num_channels A_ = num_stages A_ = hidden_sizes A_ = depths A_ = is_training A_ = use_labels A_ = intermediate_size A_ = hidden_act A_ = num_labels A_ = initializer_range A_ = out_features A_ = out_indices A_ = scope def __A ( self : Optional[int] ): A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ = None if self.use_labels: A_ = ids_tensor([self.batch_size] , self.num_labels ) A_ = self.get_config() return config, pixel_values, labels def __A ( self : Any ): return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def __A ( self : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] ): A_ = ConvNextVaModel(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() A_ = model(UpperCAmelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def __A ( self : int , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Dict ): A_ = ConvNextVaForImageClassification(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() A_ = model(UpperCAmelCase , labels=UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] ): A_ = ConvNextVaBackbone(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() A_ = model(UpperCAmelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None A_ = None A_ = ConvNextVaBackbone(config=UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() A_ = model(UpperCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def __A ( self : Union[str, Any] ): A_ = self.prepare_config_and_inputs() A_ , A_ , A_ = config_and_inputs A_ = {"pixel_values": pixel_values} return config, inputs_dict def __A ( self : int ): A_ = self.prepare_config_and_inputs() A_ , A_ , A_ = config_and_inputs A_ = {"pixel_values": pixel_values, "labels": labels} return config, inputs_dict @require_torch class _a ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : Dict = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) _lowerCamelCase : str = ( {'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification} if is_torch_available() else {} ) _lowerCamelCase : Union[str, Any] = False _lowerCamelCase : int = False _lowerCamelCase : List[Any] = False _lowerCamelCase : str = False _lowerCamelCase : Tuple = False def __A ( self : Dict ): A_ = ConvNextVaModelTester(self ) A_ = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 ) def __A ( self : Tuple ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __A ( self : int ): return @unittest.skip(reason="ConvNextV2 does not use inputs_embeds" ) def __A ( self : Union[str, Any] ): pass @unittest.skip(reason="ConvNextV2 does not support input and output embeddings" ) def __A ( self : Optional[Any] ): pass @unittest.skip(reason="ConvNextV2 does not use feedforward chunking" ) def __A ( self : Tuple ): pass def __A ( self : int ): if not self.model_tester.is_training: return for model_class in self.all_model_classes: A_ , A_ = self.model_tester.prepare_config_and_inputs_with_labels() A_ = True if model_class.__name__ in [ *get_values(UpperCAmelCase ), *get_values(UpperCAmelCase ), ]: continue A_ = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.train() A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase ) A_ = model(**UpperCAmelCase ).loss loss.backward() def __A ( self : Optional[Any] ): if not self.model_tester.is_training: return for model_class in self.all_model_classes: A_ , A_ = self.model_tester.prepare_config_and_inputs_with_labels() A_ = False A_ = True if ( model_class.__name__ in [*get_values(UpperCAmelCase ), *get_values(UpperCAmelCase )] or not model_class.supports_gradient_checkpointing ): continue A_ = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.gradient_checkpointing_enable() model.train() A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase ) A_ = model(**UpperCAmelCase ).loss loss.backward() def __A ( self : Union[str, Any] ): A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ = model_class(UpperCAmelCase ) A_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ = [*signature.parameters.keys()] A_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCAmelCase ) def __A ( self : Optional[Any] ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase ) def __A ( self : str ): def check_hidden_states_output(UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : Optional[int] ): A_ = model_class(UpperCAmelCase ) model.to(UpperCAmelCase ) model.eval() with torch.no_grad(): A_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) A_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A_ = self.model_tester.num_stages self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ = True check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A_ = True check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def __A ( self : Dict ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase ) @slow def __A ( self : List[Any] ): for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ = ConvNextVaModel.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) def __snake_case ( ): """simple docstring""" A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _a ( unittest.TestCase ): """simple docstring""" @cached_property def __A ( self : int ): return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None @slow def __A ( self : Optional[Any] ): A_ = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(UpperCAmelCase ) A_ = self.default_image_processor A_ = prepare_img() A_ = preprocessor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase ) # forward pass with torch.no_grad(): A_ = model(**UpperCAmelCase ) # verify the logits A_ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCAmelCase ) A_ = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 ) )
329
import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def __snake_case ( __UpperCamelCase : Features ): """simple docstring""" A_ = np.inf def set_batch_size(__UpperCamelCase : FeatureType ) -> None: nonlocal batch_size if isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(__UpperCamelCase ,__UpperCamelCase ) and feature.dtype == "binary": A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(__UpperCamelCase ,__UpperCamelCase ) return None if batch_size is np.inf else batch_size class _a ( snake_case_ ): """simple docstring""" def __init__( self : Tuple , UpperCAmelCase : NestedDataStructureLike[PathLike] , UpperCAmelCase : Optional[NamedSplit] = None , UpperCAmelCase : Optional[Features] = None , UpperCAmelCase : str = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : Tuple , ): super().__init__( UpperCAmelCase , split=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , keep_in_memory=UpperCAmelCase , streaming=UpperCAmelCase , num_proc=UpperCAmelCase , **UpperCAmelCase , ) A_ = path_or_paths if isinstance(UpperCAmelCase , UpperCAmelCase ) else {self.split: path_or_paths} A_ = _PACKAGED_DATASETS_MODULES["parquet"][1] A_ = Parquet( cache_dir=UpperCAmelCase , data_files=UpperCAmelCase , features=UpperCAmelCase , hash=UpperCAmelCase , **UpperCAmelCase , ) def __A ( self : Optional[Any] ): # Build iterable dataset if self.streaming: A_ = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: A_ = None A_ = None A_ = None A_ = None self.builder.download_and_prepare( download_config=UpperCAmelCase , download_mode=UpperCAmelCase , verification_mode=UpperCAmelCase , base_path=UpperCAmelCase , num_proc=self.num_proc , ) A_ = self.builder.as_dataset( split=self.split , verification_mode=UpperCAmelCase , in_memory=self.keep_in_memory ) return dataset class _a : """simple docstring""" def __init__( self : Any , UpperCAmelCase : Dataset , UpperCAmelCase : Union[PathLike, BinaryIO] , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : List[Any] , ): A_ = dataset A_ = path_or_buf A_ = batch_size or get_writer_batch_size(dataset.features ) A_ = parquet_writer_kwargs def __A ( self : int ): A_ = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , "wb+" ) as buffer: A_ = self._write(file_obj=UpperCAmelCase , batch_size=UpperCAmelCase , **self.parquet_writer_kwargs ) else: A_ = self._write(file_obj=self.path_or_buf , batch_size=UpperCAmelCase , **self.parquet_writer_kwargs ) return written def __A ( self : Tuple , UpperCAmelCase : BinaryIO , UpperCAmelCase : int , **UpperCAmelCase : Optional[Any] ): A_ = 0 A_ = parquet_writer_kwargs.pop("path_or_buf" , UpperCAmelCase ) A_ = self.dataset.features.arrow_schema A_ = pq.ParquetWriter(UpperCAmelCase , schema=UpperCAmelCase , **UpperCAmelCase ) for offset in logging.tqdm( range(0 , len(self.dataset ) , UpperCAmelCase ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ): A_ = query_table( table=self.dataset._data , key=slice(UpperCAmelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(UpperCAmelCase ) written += batch.nbytes writer.close() return written
329
1
import warnings from ...utils import logging from .image_processing_videomae import VideoMAEImageProcessor __a :Optional[Any] = logging.get_logger(__name__) class _a ( snake_case_ ): """simple docstring""" def __init__( self : List[str] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ): warnings.warn( "The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use VideoMAEImageProcessor instead." , UpperCAmelCase , ) super().__init__(*UpperCAmelCase , **UpperCAmelCase )
329
from __future__ import annotations def __snake_case ( __UpperCamelCase : int = 4 ): """simple docstring""" A_ = abs(__UpperCamelCase ) or 4 return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )] def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" return reverse_row(transpose(__UpperCamelCase ) ) # OR.. transpose(reverse_column(matrix)) def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" return reverse_row(reverse_column(__UpperCamelCase ) ) # OR.. reverse_column(reverse_row(matrix)) def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" return reverse_column(transpose(__UpperCamelCase ) ) # OR.. transpose(reverse_row(matrix)) def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" A_ = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )] return matrix def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" A_ = matrix[::-1] return matrix def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" A_ = [x[::-1] for x in matrix] return matrix def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" for i in matrix: print(*__UpperCamelCase ) if __name__ == "__main__": __a :Any = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 90 counterclockwise:\n') print_matrix(rotate_aa(matrix)) __a :Any = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 180:\n') print_matrix(rotate_aaa(matrix)) __a :Any = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 270 counterclockwise:\n') print_matrix(rotate_aaa(matrix))
329
1
from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : List[Any] = ['image_processor', 'tokenizer'] _lowerCamelCase : Optional[int] = 'BlipImageProcessor' _lowerCamelCase : List[str] = ('BertTokenizer', 'BertTokenizerFast') def __init__( self : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] ): A_ = False super().__init__(UpperCAmelCase , UpperCAmelCase ) A_ = self.image_processor def __call__( self : int , UpperCAmelCase : ImageInput = None , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Optional[Any] , ): if images is None and text is None: raise ValueError("You have to specify either images or text." ) # Get only text if images is None: A_ = self.tokenizer A_ = self.tokenizer( text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , ) return text_encoding # add pixel_values A_ = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase ) if text is not None: A_ = self.tokenizer( text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , ) else: A_ = None if text_encoding is not None: encoding_image_processor.update(UpperCAmelCase ) return encoding_image_processor def __A ( self : int , *UpperCAmelCase : Tuple , **UpperCAmelCase : Optional[int] ): return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase ) def __A ( self : int , *UpperCAmelCase : List[str] , **UpperCAmelCase : str ): return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase ) @property def __A ( self : List[Any] ): A_ = self.tokenizer.model_input_names A_ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
329
from ..utils import DummyObject, requires_backends class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Union[str, Any] = ['torch', 'transformers', 'onnx'] def __init__( self : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Tuple , *UpperCAmelCase : Tuple , **UpperCAmelCase : Union[str, Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Dict , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Tuple ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Tuple = ['torch', 'transformers', 'onnx'] def __init__( self : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : List[Any] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Tuple , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Any = ['torch', 'transformers', 'onnx'] def __init__( self : Dict , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Optional[int] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : List[str] = ['torch', 'transformers', 'onnx'] def __init__( self : List[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : int ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Any , *UpperCAmelCase : List[Any] , **UpperCAmelCase : str ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Dict = ['torch', 'transformers', 'onnx'] def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : Tuple ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : Dict ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : int , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : List[str] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : List[Any] = ['torch', 'transformers', 'onnx'] def __init__( self : str , *UpperCAmelCase : str , **UpperCAmelCase : List[Any] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : List[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "transformers", "onnx"] )
329
1
from heapq import heappop, heappush import numpy as np def __snake_case ( __UpperCamelCase : np.ndarray ,__UpperCamelCase : tuple[int, int] ,__UpperCamelCase : tuple[int, int] ,__UpperCamelCase : bool ,): """simple docstring""" A_ , A_ = grid.shape A_ = [-1, 1, 0, 0] A_ = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] A_ , A_ = [(0, source)], set() A_ = np.full((rows, cols) ,np.inf ) A_ = 0 A_ = np.empty((rows, cols) ,dtype=__UpperCamelCase ) A_ = None while queue: ((A_) , (A_)) = heappop(__UpperCamelCase ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: A_ = [] while (x, y) != source: path.append((x, y) ) A_ , A_ = predecessors[x, y] path.append(__UpperCamelCase ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(__UpperCamelCase ) ): A_ , A_ = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: A_ = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(__UpperCamelCase ,(dist + 1, (nx, ny)) ) A_ = dist + 1 A_ = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
329
import itertools import math def __snake_case ( __UpperCamelCase : int ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 ,int(math.sqrt(__UpperCamelCase ) + 1 ) ,6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __snake_case ( ): """simple docstring""" A_ = 2 while True: if is_prime(__UpperCamelCase ): yield num num += 1 def __snake_case ( __UpperCamelCase : int = 1_0001 ): """simple docstring""" return next(itertools.islice(prime_generator() ,nth - 1 ,__UpperCamelCase ) ) if __name__ == "__main__": print(F"{solution() = }")
329
1
from __future__ import annotations def __snake_case ( __UpperCamelCase : list[int] ,__UpperCamelCase : int ): """simple docstring""" A_ = 0 A_ = len(__UpperCamelCase ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: A_ = i + 1 else: A_ = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(F"{two_pointer([2, 7, 11, 15], 9) = }")
329
from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class _a : """simple docstring""" def __init__( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : List[str]=13 , UpperCAmelCase : Tuple=7 , UpperCAmelCase : int=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Optional[Any]=99 , UpperCAmelCase : str=32 , UpperCAmelCase : Dict=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Optional[int]=37 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : int=16 , UpperCAmelCase : Any=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : List[Any]=None , ): A_ = parent A_ = 13 A_ = 7 A_ = True A_ = True A_ = True A_ = True A_ = 99 A_ = 384 A_ = 2 A_ = 4 A_ = 37 A_ = "gelu" A_ = 0.1 A_ = 0.1 A_ = 512 A_ = 16 A_ = 2 A_ = 0.02 A_ = 3 A_ = 4 A_ = 128 A_ = 2 A_ = 9 A_ = 1 A_ = None def __A ( self : Optional[int] ): A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ = None if self.use_input_mask: A_ = random_attention_mask([self.batch_size, self.seq_length] ) A_ = None if self.use_token_type_ids: A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A_ = None A_ = None A_ = None if self.use_labels: A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A_ = ids_tensor([self.batch_size] , self.num_choices ) A_ = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ): A_ = TFConvBertModel(config=UpperCAmelCase ) A_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} A_ = [input_ids, input_mask] A_ = model(UpperCAmelCase ) A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Tuple ): A_ = TFConvBertForMaskedLM(config=UpperCAmelCase ) A_ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : int ): A_ = self.num_labels A_ = TFConvBertForSequenceClassification(config=UpperCAmelCase ) A_ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ): A_ = self.num_choices A_ = TFConvBertForMultipleChoice(config=UpperCAmelCase ) A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) A_ = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __A ( self : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str ): A_ = self.num_labels A_ = TFConvBertForTokenClassification(config=UpperCAmelCase ) A_ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __A ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ): A_ = TFConvBertForQuestionAnswering(config=UpperCAmelCase ) A_ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __A ( self : List[str] ): A_ = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) = config_and_inputs A_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class _a ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : Union[str, Any] = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) _lowerCamelCase : Any = ( { 'feature-extraction': TFConvBertModel, 'fill-mask': TFConvBertForMaskedLM, 'question-answering': TFConvBertForQuestionAnswering, 'text-classification': TFConvBertForSequenceClassification, 'token-classification': TFConvBertForTokenClassification, 'zero-shot': TFConvBertForSequenceClassification, } if is_tf_available() else {} ) _lowerCamelCase : Dict = False _lowerCamelCase : Optional[int] = False _lowerCamelCase : Dict = False def __A ( self : List[str] ): A_ = TFConvBertModelTester(self ) A_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 ) def __A ( self : Tuple ): self.config_tester.run_common_tests() def __A ( self : Tuple ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase ) def __A ( self : Dict ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase ) def __A ( self : List[Any] ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase ) def __A ( self : Dict ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase ) def __A ( self : int ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase ) def __A ( self : List[Any] ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase ) @slow def __A ( self : str ): A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() A_ = True A_ = True if hasattr(UpperCAmelCase , "use_cache" ): A_ = True A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase ) for model_class in self.all_model_classes: A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) A_ = model_class(UpperCAmelCase ) A_ = len(model(UpperCAmelCase ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCAmelCase , saved_model=UpperCAmelCase ) A_ = os.path.join(UpperCAmelCase , "saved_model" , "1" ) A_ = tf.keras.models.load_model(UpperCAmelCase ) A_ = model(UpperCAmelCase ) if self.is_encoder_decoder: A_ = outputs["encoder_hidden_states"] A_ = outputs["encoder_attentions"] else: A_ = outputs["hidden_states"] A_ = outputs["attentions"] self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase ) A_ = getattr( self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def __A ( self : List[str] ): A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) self.assertIsNotNone(UpperCAmelCase ) def __A ( self : Any ): A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() A_ = True A_ = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length ) A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase ) A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase ) def check_decoder_attentions_output(UpperCAmelCase : Optional[int] ): A_ = len(UpperCAmelCase ) self.assertEqual(out_len % 2 , 0 ) A_ = outputs.decoder_attentions self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(UpperCAmelCase : Optional[Any] ): A_ = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: A_ = True A_ = False A_ = model_class(UpperCAmelCase ) A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) A_ = len(UpperCAmelCase ) self.assertEqual(config.output_hidden_states , UpperCAmelCase ) check_encoder_attentions_output(UpperCAmelCase ) if self.is_encoder_decoder: A_ = model_class(UpperCAmelCase ) A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , UpperCAmelCase ) check_decoder_attentions_output(UpperCAmelCase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] A_ = True A_ = model_class(UpperCAmelCase ) A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , UpperCAmelCase ) check_encoder_attentions_output(UpperCAmelCase ) # Check attention is always last and order is fine A_ = True A_ = True A_ = model_class(UpperCAmelCase ) A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase ) ) self.assertEqual(model.config.output_hidden_states , UpperCAmelCase ) check_encoder_attentions_output(UpperCAmelCase ) @require_tf class _a ( unittest.TestCase ): """simple docstring""" @slow def __A ( self : Dict ): A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) A_ = tf.constant([[0, 1, 2, 3, 4, 5]] ) A_ = model(UpperCAmelCase )[0] A_ = [1, 6, 768] self.assertEqual(output.shape , UpperCAmelCase ) A_ = tf.constant( [ [ [-0.03_475_493, -0.4_686_034, -0.30_638_832], [0.22_637_248, -0.26_988_646, -0.7_423_424], [0.10_324_868, -0.45_013_508, -0.58_280_784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-4 )
329
1
def __snake_case ( __UpperCamelCase : bytes ): """simple docstring""" return "".join([hex(__UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(__UpperCamelCase )] ) def __snake_case ( __UpperCamelCase : str ): """simple docstring""" if (len(__UpperCamelCase ) % 2) != 0: raise ValueError( "Base16 encoded data is invalid:\nData does not have an even number of hex digits." ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(__UpperCamelCase ) <= set("0123456789ABCDEF" ): raise ValueError( "Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] ,16 ) for i in range(0 ,len(__UpperCamelCase ) ,2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
329
from ...configuration_utils import PretrainedConfig from ...utils import logging __a :Dict = logging.get_logger(__name__) __a :int = { 'google/realm-cc-news-pretrained-embedder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-encoder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-scorer': ( 'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-openqa': ( 'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json' ), 'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json', 'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json', 'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json', 'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json', # See all REALM models at https://huggingface.co/models?filter=realm } class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : List[Any] = 'realm' def __init__( self : Union[str, Any] , UpperCAmelCase : Optional[Any]=30522 , UpperCAmelCase : List[str]=768 , UpperCAmelCase : Optional[Any]=128 , UpperCAmelCase : str=12 , UpperCAmelCase : Dict=12 , UpperCAmelCase : Optional[Any]=8 , UpperCAmelCase : Any=3072 , UpperCAmelCase : Union[str, Any]="gelu_new" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : int=512 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=1E-12 , UpperCAmelCase : List[Any]=256 , UpperCAmelCase : Optional[int]=10 , UpperCAmelCase : List[str]=1E-3 , UpperCAmelCase : Any=5 , UpperCAmelCase : List[Any]=320 , UpperCAmelCase : Optional[Any]=13353718 , UpperCAmelCase : Tuple=5000 , UpperCAmelCase : List[str]=1 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Union[str, Any]=2 , **UpperCAmelCase : List[str] , ): super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase ) # Common config A_ = vocab_size A_ = max_position_embeddings A_ = hidden_size A_ = retriever_proj_size A_ = num_hidden_layers A_ = num_attention_heads A_ = num_candidates A_ = intermediate_size A_ = hidden_act A_ = hidden_dropout_prob A_ = attention_probs_dropout_prob A_ = initializer_range A_ = type_vocab_size A_ = layer_norm_eps # Reader config A_ = span_hidden_size A_ = max_span_width A_ = reader_layer_norm_eps A_ = reader_beam_size A_ = reader_seq_len # Retrieval config A_ = num_block_records A_ = searcher_beam_size
329
1
__a :int = 256 # Modulus to hash a string __a :Union[str, Any] = 100_0003 def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ): """simple docstring""" A_ = len(__UpperCamelCase ) A_ = len(__UpperCamelCase ) if p_len > t_len: return False A_ = 0 A_ = 0 A_ = 1 # Calculating the hash of pattern and substring of text for i in range(__UpperCamelCase ): A_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus A_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue A_ = (modulus_power * alphabet_size) % modulus for i in range(0 ,t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash A_ = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def __snake_case ( ): """simple docstring""" A_ = "abc1abc12" A_ = "alskfjaldsabc1abc1abc12k23adsfabcabc" A_ = "alskfjaldsk23adsfabcabc" assert rabin_karp(__UpperCamelCase ,__UpperCamelCase ) and not rabin_karp(__UpperCamelCase ,__UpperCamelCase ) # Test 2) A_ = "ABABX" A_ = "ABABZABABYABABX" assert rabin_karp(__UpperCamelCase ,__UpperCamelCase ) # Test 3) A_ = "AAAB" A_ = "ABAAAAAB" assert rabin_karp(__UpperCamelCase ,__UpperCamelCase ) # Test 4) A_ = "abcdabcy" A_ = "abcxabcdabxabcdabcdabcy" assert rabin_karp(__UpperCamelCase ,__UpperCamelCase ) # Test 5) A_ = "Lü" A_ = "Lüsai" assert rabin_karp(__UpperCamelCase ,__UpperCamelCase ) A_ = "Lue" assert not rabin_karp(__UpperCamelCase ,__UpperCamelCase ) print("Success." ) if __name__ == "__main__": test_rabin_karp()
329
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() __a :Optional[Any] = logging.get_logger(__name__) def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ): """simple docstring""" A_ = original_name.split("." )[0] A_ = key.split("." ) A_ = int(key_list[key_list.index(__UpperCamelCase ) - 2] ) A_ = int(key_list[key_list.index(__UpperCamelCase ) - 1] ) A_ = orig_block_num - offset A_ = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' ,f'''block.{new_block_num}.{layer_num}.{new_name}''' ) return key def __snake_case ( __UpperCamelCase : Any ): """simple docstring""" A_ = OrderedDict() A_ , A_ = 0, 0 for key, value in state_dict.items(): if key.startswith("network" ): A_ = key.replace("network" ,"poolformer.encoder" ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith("bias" ) and "patch_embed" not in key: patch_emb_offset += 1 A_ = key[: key.find("proj" )] A_ = key.replace(__UpperCamelCase ,f'''patch_embeddings.{total_embed_found}.''' ) A_ = key.replace("proj" ,"projection" ) if key.endswith("bias" ): total_embed_found += 1 if "patch_embeddings" in key: A_ = "poolformer.encoder." + key if "mlp.fc1" in key: A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc1" ,"output.conv1" ) if "mlp.fc2" in key: A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc2" ,"output.conv2" ) if "norm1" in key: A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm1" ,"before_norm" ) if "norm2" in key: A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm2" ,"after_norm" ) if "layer_scale_1" in key: A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_1" ,"layer_scale_1" ) if "layer_scale_2" in key: A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_2" ,"layer_scale_2" ) if "head" in key: A_ = key.replace("head" ,"classifier" ) A_ = value return new_state_dict def __snake_case ( ): """simple docstring""" A_ = "http://images.cocodataset.org/val2017/000000039769.jpg" A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw ) return image @torch.no_grad() def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ): """simple docstring""" A_ = PoolFormerConfig() # set attributes based on model_name A_ = "huggingface/label-files" A_ = model_name[-3:] A_ = 1000 A_ = "imagenet-1k-id2label.json" A_ = (1, 1000) # set config attributes A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) ) A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()} A_ = idalabel A_ = {v: k for k, v in idalabel.items()} if size == "s12": A_ = [2, 2, 6, 2] A_ = [64, 128, 320, 512] A_ = 4.0 A_ = 0.9 elif size == "s24": A_ = [4, 4, 12, 4] A_ = [64, 128, 320, 512] A_ = 4.0 A_ = 0.9 elif size == "s36": A_ = [6, 6, 18, 6] A_ = [64, 128, 320, 512] A_ = 4.0 A_ = 1E-6 A_ = 0.9 elif size == "m36": A_ = [6, 6, 18, 6] A_ = [96, 192, 384, 768] A_ = 4.0 A_ = 1E-6 A_ = 0.95 elif size == "m48": A_ = [8, 8, 24, 8] A_ = [96, 192, 384, 768] A_ = 4.0 A_ = 1E-6 A_ = 0.95 else: raise ValueError(f'''Size {size} not supported''' ) # load image processor A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase ) # Prepare image A_ = prepare_img() A_ = image_processor(images=__UpperCamelCase ,return_tensors="pt" ).pixel_values logger.info(f'''Converting model {model_name}...''' ) # load original state dict A_ = torch.load(__UpperCamelCase ,map_location=torch.device("cpu" ) ) # rename keys A_ = rename_keys(__UpperCamelCase ) # create HuggingFace model and load state dict A_ = PoolFormerForImageClassification(__UpperCamelCase ) model.load_state_dict(__UpperCamelCase ) model.eval() # Define image processor A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase ) A_ = image_processor(images=prepare_img() ,return_tensors="pt" ).pixel_values # forward pass A_ = model(__UpperCamelCase ) A_ = outputs.logits # define expected logit slices for different models if size == "s12": A_ = torch.tensor([-0.3045, -0.6758, -0.4869] ) elif size == "s24": A_ = torch.tensor([0.4402, -0.1374, -0.8045] ) elif size == "s36": A_ = torch.tensor([-0.6080, -0.5133, -0.5898] ) elif size == "m36": A_ = torch.tensor([0.3952, 0.2263, -1.2668] ) elif size == "m48": A_ = torch.tensor([0.1167, -0.0656, -0.3423] ) else: raise ValueError(f'''Size {size} not supported''' ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1E-2 ) # finally, save model and image processor logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) model.save_pretrained(__UpperCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __a :Union[str, Any] = argparse.ArgumentParser() parser.add_argument( '--model_name', default='poolformer_s12', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) __a :int = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
329
1
import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(snake_case_ ) , 'Tatoeba directory does not exist.' ) class _a ( unittest.TestCase ): """simple docstring""" @cached_property def __A ( self : Union[str, Any] ): A_ = tempfile.mkdtemp() return TatoebaConverter(save_dir=UpperCAmelCase ) @slow def __A ( self : Any ): self.resolver.convert_models(["heb-eng"] ) @slow def __A ( self : List[str] ): A_ , A_ = self.resolver.write_model_card("opus-mt-he-en" , dry_run=UpperCAmelCase ) assert mmeta["long_pair"] == "heb-eng"
329
import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : torch.FloatTensor _lowerCamelCase : Optional[torch.FloatTensor] = None def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Any=0.999 ,__UpperCamelCase : Any="cosine" ,): """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(__UpperCamelCase : Any ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(__UpperCamelCase : int ): return math.exp(t * -12.0 ) else: raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) A_ = [] for i in range(__UpperCamelCase ): A_ = i / num_diffusion_timesteps A_ = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(__UpperCamelCase ) / alpha_bar_fn(__UpperCamelCase ) ,__UpperCamelCase ) ) return torch.tensor(__UpperCamelCase ,dtype=torch.floataa ) class _a ( snake_case_ , snake_case_ ): """simple docstring""" @register_to_config def __init__( self : Optional[int] , UpperCAmelCase : int = 1000 , UpperCAmelCase : str = "fixed_small_log" , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[float] = 1.0 , UpperCAmelCase : str = "epsilon" , UpperCAmelCase : str = "squaredcos_cap_v2" , ): if beta_schedule != "squaredcos_cap_v2": raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" ) A_ = betas_for_alpha_bar(UpperCAmelCase ) A_ = 1.0 - self.betas A_ = torch.cumprod(self.alphas , dim=0 ) A_ = torch.tensor(1.0 ) # standard deviation of the initial noise distribution A_ = 1.0 # setable values A_ = None A_ = torch.from_numpy(np.arange(0 , UpperCAmelCase )[::-1].copy() ) A_ = variance_type def __A ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None ): return sample def __A ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, torch.device] = None ): A_ = num_inference_steps A_ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) A_ = (np.arange(0 , UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa ) A_ = torch.from_numpy(UpperCAmelCase ).to(UpperCAmelCase ) def __A ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str=None , UpperCAmelCase : Any=None , UpperCAmelCase : List[Any]=None ): if prev_timestep is None: A_ = t - 1 A_ = self.alphas_cumprod[t] A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one A_ = 1 - alpha_prod_t A_ = 1 - alpha_prod_t_prev if prev_timestep == t - 1: A_ = self.betas[t] else: A_ = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample A_ = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: A_ = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": A_ = torch.log(torch.clamp(UpperCAmelCase , min=1E-20 ) ) A_ = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler A_ = variance.log() A_ = beta.log() A_ = (predicted_variance + 1) / 2 A_ = frac * max_log + (1 - frac) * min_log return variance def __A ( self : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Dict=None , UpperCAmelCase : bool = True , ): A_ = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": A_ , A_ = torch.split(UpperCAmelCase , sample.shape[1] , dim=1 ) else: A_ = None # 1. compute alphas, betas if prev_timestep is None: A_ = t - 1 A_ = self.alphas_cumprod[t] A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one A_ = 1 - alpha_prod_t A_ = 1 - alpha_prod_t_prev if prev_timestep == t - 1: A_ = self.betas[t] A_ = self.alphas[t] else: A_ = 1 - alpha_prod_t / alpha_prod_t_prev A_ = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": A_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": A_ = model_output else: raise ValueError( f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`''' " for the UnCLIPScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: A_ = torch.clamp( UpperCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf A_ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t A_ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf A_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise A_ = 0 if t > 0: A_ = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=UpperCAmelCase , device=model_output.device ) A_ = self._get_variance( UpperCAmelCase , predicted_variance=UpperCAmelCase , prev_timestep=UpperCAmelCase , ) if self.variance_type == "fixed_small_log": A_ = variance elif self.variance_type == "learned_range": A_ = (0.5 * variance).exp() else: raise ValueError( f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`''' " for the UnCLIPScheduler." ) A_ = variance * variance_noise A_ = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=UpperCAmelCase , pred_original_sample=UpperCAmelCase ) def __A ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.IntTensor , ): # Make sure alphas_cumprod and timestep have same device and dtype as original_samples A_ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) A_ = timesteps.to(original_samples.device ) A_ = alphas_cumprod[timesteps] ** 0.5 A_ = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): A_ = sqrt_alpha_prod.unsqueeze(-1 ) A_ = (1 - alphas_cumprod[timesteps]) ** 0.5 A_ = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): A_ = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) A_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
329
1
from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _a : """simple docstring""" def __init__( self : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Dict=2 , UpperCAmelCase : Tuple=3 , UpperCAmelCase : Any=4 , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Any=7 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Optional[int]=99 , UpperCAmelCase : Dict=36 , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Optional[Any]=4 , UpperCAmelCase : List[Any]=37 , UpperCAmelCase : Dict="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : str=512 , UpperCAmelCase : List[Any]=16 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : Any=0.02 , UpperCAmelCase : int=6 , UpperCAmelCase : Optional[int]=6 , UpperCAmelCase : Optional[int]=3 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Any=None , UpperCAmelCase : Any=1000 , ): A_ = parent A_ = batch_size A_ = num_channels A_ = image_size A_ = patch_size A_ = is_training A_ = use_input_mask A_ = use_token_type_ids A_ = use_labels A_ = vocab_size A_ = hidden_size A_ = num_hidden_layers A_ = num_attention_heads A_ = intermediate_size A_ = hidden_act A_ = hidden_dropout_prob A_ = attention_probs_dropout_prob A_ = max_position_embeddings A_ = type_vocab_size A_ = type_sequence_label_size A_ = initializer_range A_ = coordinate_size A_ = shape_size A_ = num_labels A_ = num_choices A_ = scope A_ = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) A_ = text_seq_length A_ = (image_size // patch_size) ** 2 + 1 A_ = self.text_seq_length + self.image_seq_length def __A ( self : Optional[int] ): A_ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) A_ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) A_ = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: A_ = bbox[i, j, 3] A_ = bbox[i, j, 1] A_ = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: A_ = bbox[i, j, 2] A_ = bbox[i, j, 0] A_ = tmp_coordinate A_ = tf.constant(UpperCAmelCase ) A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ = None if self.use_input_mask: A_ = random_attention_mask([self.batch_size, self.text_seq_length] ) A_ = None if self.use_token_type_ids: A_ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) A_ = None A_ = None if self.use_labels: A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) A_ = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def __A ( self : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : str ): A_ = TFLayoutLMvaModel(config=UpperCAmelCase ) # text + image A_ = model(UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase ) A_ = model( UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , training=UpperCAmelCase , ) A_ = model(UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only A_ = model(UpperCAmelCase , training=UpperCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only A_ = model({"pixel_values": pixel_values} , training=UpperCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def __A ( self : str , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] ): A_ = self.num_labels A_ = TFLayoutLMvaForSequenceClassification(config=UpperCAmelCase ) A_ = model( UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] ): A_ = self.num_labels A_ = TFLayoutLMvaForTokenClassification(config=UpperCAmelCase ) A_ = model( UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def __A ( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] ): A_ = 2 A_ = TFLayoutLMvaForQuestionAnswering(config=UpperCAmelCase ) A_ = model( UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , training=UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __A ( self : List[Any] ): A_ = self.prepare_config_and_inputs() ((A_) , (A_) , (A_) , (A_) , (A_) , (A_) , (A_) , (A_)) = config_and_inputs A_ = { "input_ids": input_ids, "bbox": bbox, "pixel_values": pixel_values, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_tf class _a ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : str = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) _lowerCamelCase : Optional[int] = ( {'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel} if is_tf_available() else {} ) _lowerCamelCase : Any = False _lowerCamelCase : str = False _lowerCamelCase : str = False def __A ( self : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int ): return True def __A ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Tuple=False ): A_ = copy.deepcopy(UpperCAmelCase ) if model_class in get_values(UpperCAmelCase ): A_ = { k: tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(UpperCAmelCase , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(UpperCAmelCase ): A_ = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(UpperCAmelCase ): A_ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) A_ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(UpperCAmelCase ): A_ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(UpperCAmelCase ): A_ = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def __A ( self : Union[str, Any] ): A_ = TFLayoutLMvaModelTester(self ) A_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 ) def __A ( self : int ): self.config_tester.run_common_tests() def __A ( self : Optional[int] ): A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ = model_class(UpperCAmelCase ) if getattr(UpperCAmelCase , "hf_compute_loss" , UpperCAmelCase ): # The number of elements in the loss should be the same as the number of elements in the label A_ = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase ) A_ = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=UpperCAmelCase )[0] ] A_ = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs A_ = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase ) A_ = prepared_for_class.pop("input_ids" ) A_ = model(UpperCAmelCase , **UpperCAmelCase )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions A_ = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase ) A_ = prepared_for_class.pop("input_ids" ) if "labels" in prepared_for_class: A_ = prepared_for_class["labels"].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: A_ = -100 A_ = tf.convert_to_tensor(UpperCAmelCase ) A_ = model(UpperCAmelCase , **UpperCAmelCase )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict A_ = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase ) A_ = model(UpperCAmelCase )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple A_ = self._prepare_for_class(inputs_dict.copy() , UpperCAmelCase , return_labels=UpperCAmelCase ) # Get keys that were added with the _prepare_for_class function A_ = prepared_for_class.keys() - inputs_dict.keys() A_ = inspect.signature(model.call ).parameters A_ = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple A_ = {0: "input_ids"} for label_key in label_keys: A_ = signature_names.index(UpperCAmelCase ) A_ = label_key A_ = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple A_ = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: A_ = prepared_for_class[value] A_ = tuple(UpperCAmelCase ) # Send to model A_ = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def __A ( self : Any ): ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def __A ( self : Optional[int] ): ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A_ = type self.model_tester.create_and_check_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def __A ( self : Any ): ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def __A ( self : int ): ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def __A ( self : Union[str, Any] ): ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) @slow def __A ( self : Dict ): for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ = TFLayoutLMvaModel.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) def __snake_case ( ): """simple docstring""" A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf class _a ( unittest.TestCase ): """simple docstring""" @cached_property def __A ( self : Optional[Any] ): return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase ) if is_vision_available() else None @slow def __A ( self : int ): A_ = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ) A_ = self.default_image_processor A_ = prepare_img() A_ = image_processor(images=UpperCAmelCase , return_tensors="tf" ).pixel_values A_ = tf.constant([[1, 2]] ) A_ = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass A_ = model(input_ids=UpperCAmelCase , bbox=UpperCAmelCase , pixel_values=UpperCAmelCase , training=UpperCAmelCase ) # verify the logits A_ = (1, 199, 768) self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase ) A_ = tf.constant( [[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase , atol=1E-4 ) )
329
from math import isqrt, loga def __snake_case ( __UpperCamelCase : int ): """simple docstring""" A_ = [True] * max_number for i in range(2 ,isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 ,__UpperCamelCase ,__UpperCamelCase ): A_ = False return [i for i in range(2 ,__UpperCamelCase ) if is_prime[i]] def __snake_case ( __UpperCamelCase : int = 80_0800 ,__UpperCamelCase : int = 80_0800 ): """simple docstring""" A_ = degree * loga(__UpperCamelCase ) A_ = int(__UpperCamelCase ) A_ = calculate_prime_numbers(__UpperCamelCase ) A_ = 0 A_ = 0 A_ = len(__UpperCamelCase ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(F"{solution() = }")
329
1
import numpy as np def __snake_case ( __UpperCamelCase : np.array ): """simple docstring""" return (2 / (1 + np.exp(-2 * vector ))) - 1 if __name__ == "__main__": import doctest doctest.testmod()
329
import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() __a :str = logging.get_logger(__name__) def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ): """simple docstring""" A_ = RobertaPreLayerNormConfig.from_pretrained( __UpperCamelCase ,architectures=["RobertaPreLayerNormForMaskedLM"] ) # convert state_dict A_ = torch.load(hf_hub_download(repo_id=__UpperCamelCase ,filename="pytorch_model.bin" ) ) A_ = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith("roberta." ): A_ = "roberta_prelayernorm." + tensor_key[len("roberta." ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ): continue A_ = tensor_value A_ = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=__UpperCamelCase ,config=__UpperCamelCase ,state_dict=__UpperCamelCase ) model.save_pretrained(__UpperCamelCase ) # convert tokenizer A_ = AutoTokenizer.from_pretrained(__UpperCamelCase ) tokenizer.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __a :Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint-repo', default=None, type=str, required=True, help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __a :Any = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
329
1
import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor __a :str = logging.get_logger(__name__) class _a ( snake_case_ ): """simple docstring""" def __init__( self : Any , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Tuple ): warnings.warn( "The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use MobileViTImageProcessor instead." , UpperCAmelCase , ) super().__init__(*UpperCAmelCase , **UpperCAmelCase )
329
from maths.prime_factors import prime_factors def __snake_case ( __UpperCamelCase : int ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = f'''Input value of [number={number}] must be an integer''' raise TypeError(__UpperCamelCase ) if number < 1: raise ValueError("Input must be a positive integer" ) return -1 if len(prime_factors(__UpperCamelCase ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
329
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a :Tuple = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Any = [ 'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'WavLMForAudioFrameClassification', 'WavLMForCTC', 'WavLMForSequenceClassification', 'WavLMForXVector', 'WavLMModel', 'WavLMPreTrainedModel', ] if TYPE_CHECKING: from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavlm import ( WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, WavLMForXVector, WavLMModel, WavLMPreTrainedModel, ) else: import sys __a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
329
import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore __a :int = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" __a :Any = [file for file in filepaths if file != file.lower()] if upper_files: print(F"{len(upper_files)} files contain uppercase characters:") print('\n'.join(upper_files) + '\n') __a :Tuple = [file for file in filepaths if ' ' in file] if space_files: print(F"{len(space_files)} files contain space characters:") print('\n'.join(space_files) + '\n') __a :str = [file for file in filepaths if '-' in file] if hyphen_files: print(F"{len(hyphen_files)} files contain hyphen characters:") print('\n'.join(hyphen_files) + '\n') __a :List[str] = [file for file in filepaths if os.sep not in file] if nodir_files: print(F"{len(nodir_files)} files are not in a directory:") print('\n'.join(nodir_files) + '\n') __a :Any = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
329
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer __a :Optional[Any] = logging.get_logger(__name__) __a :Any = {'vocab_file': 'vocab.txt'} __a :Any = { 'vocab_file': { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt', } } __a :List[str] = { 'YituTech/conv-bert-base': 512, 'YituTech/conv-bert-medium-small': 512, 'YituTech/conv-bert-small': 512, } __a :List[str] = { 'YituTech/conv-bert-base': {'do_lower_case': True}, 'YituTech/conv-bert-medium-small': {'do_lower_case': True}, 'YituTech/conv-bert-small': {'do_lower_case': True}, } class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Tuple = VOCAB_FILES_NAMES _lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION _lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase : Union[str, Any] = ConvBertTokenizer def __init__( self : Optional[int] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : int="[UNK]" , UpperCAmelCase : str="[SEP]" , UpperCAmelCase : Union[str, Any]="[PAD]" , UpperCAmelCase : Tuple="[CLS]" , UpperCAmelCase : Tuple="[MASK]" , UpperCAmelCase : Any=True , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : List[str] , ): super().__init__( UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , ) A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars ): A_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) ) A_ = do_lower_case A_ = strip_accents A_ = tokenize_chinese_chars A_ = normalizer_class(**UpperCAmelCase ) A_ = do_lower_case def __A ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Dict=None ): A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __A ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ): A_ = [self.sep_token_id] A_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ): A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase ) return tuple(UpperCAmelCase )
329
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __a :Union[str, Any] = { 'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'], 'tokenization_biogpt': ['BioGptTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Optional[int] = [ 'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BioGptForCausalLM', 'BioGptForTokenClassification', 'BioGptForSequenceClassification', 'BioGptModel', 'BioGptPreTrainedModel', ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys __a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
329
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __a :List[str] = logging.get_logger(__name__) __a :Optional[Any] = { 'microsoft/unispeech-sat-base-100h-libri-ft': ( 'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json' ), # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat } class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Dict = 'unispeech-sat' def __init__( self : Tuple , UpperCAmelCase : int=32 , UpperCAmelCase : Optional[int]=768 , UpperCAmelCase : Union[str, Any]=12 , UpperCAmelCase : Tuple=12 , UpperCAmelCase : List[str]=3072 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Any=0.1 , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : str=0.1 , UpperCAmelCase : int=0.0 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : Dict=0.02 , UpperCAmelCase : int=1E-5 , UpperCAmelCase : Optional[int]="group" , UpperCAmelCase : Dict="gelu" , UpperCAmelCase : Dict=(512, 512, 512, 512, 512, 512, 512) , UpperCAmelCase : int=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase : List[str]=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : str=128 , UpperCAmelCase : Optional[int]=16 , UpperCAmelCase : str=False , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Dict=0.05 , UpperCAmelCase : Optional[Any]=10 , UpperCAmelCase : Dict=2 , UpperCAmelCase : Optional[int]=0.0 , UpperCAmelCase : int=10 , UpperCAmelCase : Optional[Any]=0 , UpperCAmelCase : int=320 , UpperCAmelCase : Dict=2 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Any=100 , UpperCAmelCase : Tuple=256 , UpperCAmelCase : int=256 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Dict="mean" , UpperCAmelCase : List[str]=False , UpperCAmelCase : Any=False , UpperCAmelCase : Optional[Any]=256 , UpperCAmelCase : Optional[int]=(512, 512, 512, 512, 1500) , UpperCAmelCase : Union[str, Any]=(5, 3, 3, 1, 1) , UpperCAmelCase : Any=(1, 2, 3, 1, 1) , UpperCAmelCase : Dict=512 , UpperCAmelCase : Any=0 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : int=2 , UpperCAmelCase : List[str]=504 , **UpperCAmelCase : str , ): super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase ) A_ = hidden_size A_ = feat_extract_norm A_ = feat_extract_activation A_ = list(UpperCAmelCase ) A_ = list(UpperCAmelCase ) A_ = list(UpperCAmelCase ) A_ = conv_bias A_ = num_conv_pos_embeddings A_ = num_conv_pos_embedding_groups A_ = len(self.conv_dim ) A_ = num_hidden_layers A_ = intermediate_size A_ = hidden_act A_ = num_attention_heads A_ = hidden_dropout A_ = attention_dropout A_ = activation_dropout A_ = feat_proj_dropout A_ = final_dropout A_ = layerdrop A_ = layer_norm_eps A_ = initializer_range A_ = vocab_size A_ = num_clusters A_ = do_stable_layer_norm A_ = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 A_ = apply_spec_augment A_ = mask_time_prob A_ = mask_time_length A_ = mask_time_min_masks A_ = mask_feature_prob A_ = mask_feature_length A_ = mask_feature_min_masks # parameters for pretraining with codevector quantized representations A_ = num_codevectors_per_group A_ = num_codevector_groups A_ = contrastive_logits_temperature A_ = feat_quantizer_dropout A_ = num_negatives A_ = codevector_dim A_ = proj_codevector_dim A_ = diversity_loss_weight # ctc loss A_ = ctc_loss_reduction A_ = ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. A_ = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. A_ = list(UpperCAmelCase ) A_ = list(UpperCAmelCase ) A_ = list(UpperCAmelCase ) A_ = xvector_output_dim @property def __A ( self : List[str] ): return functools.reduce(operator.mul , self.conv_stride , 1 )
329
import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def __snake_case ( __UpperCamelCase : Union[str, Any] ): """simple docstring""" if is_torch_version("<" ,"2.0.0" ) or not hasattr(__UpperCamelCase ,"_dynamo" ): return False return isinstance(__UpperCamelCase ,torch._dynamo.eval_frame.OptimizedModule ) def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : bool = True ): """simple docstring""" A_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) A_ = is_compiled_module(__UpperCamelCase ) if is_compiled: A_ = model A_ = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = model.module if not keep_fpaa_wrapper: A_ = getattr(__UpperCamelCase ,"forward" ) A_ = model.__dict__.pop("_original_forward" ,__UpperCamelCase ) if original_forward is not None: while hasattr(__UpperCamelCase ,"__wrapped__" ): A_ = forward.__wrapped__ if forward == original_forward: break A_ = forward if getattr(__UpperCamelCase ,"_converted_to_transformer_engine" ,__UpperCamelCase ): convert_model(__UpperCamelCase ,to_transformer_engine=__UpperCamelCase ) if is_compiled: A_ = model A_ = compiled_model return model def __snake_case ( ): """simple docstring""" PartialState().wait_for_everyone() def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Any ): """simple docstring""" if PartialState().distributed_type == DistributedType.TPU: xm.save(__UpperCamelCase ,__UpperCamelCase ) elif PartialState().local_process_index == 0: torch.save(__UpperCamelCase ,__UpperCamelCase ) @contextmanager def __snake_case ( **__UpperCamelCase : Any ): """simple docstring""" for key, value in kwargs.items(): A_ = str(__UpperCamelCase ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def __snake_case ( __UpperCamelCase : Optional[Any] ): """simple docstring""" if not hasattr(__UpperCamelCase ,"__qualname__" ) and not hasattr(__UpperCamelCase ,"__name__" ): A_ = getattr(__UpperCamelCase ,"__class__" ,__UpperCamelCase ) if hasattr(__UpperCamelCase ,"__qualname__" ): return obj.__qualname__ if hasattr(__UpperCamelCase ,"__name__" ): return obj.__name__ return str(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ): """simple docstring""" for key, value in source.items(): if isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = destination.setdefault(__UpperCamelCase ,{} ) merge_dicts(__UpperCamelCase ,__UpperCamelCase ) else: A_ = value return destination def __snake_case ( __UpperCamelCase : int = None ): """simple docstring""" if port is None: A_ = 2_9500 with socket.socket(socket.AF_INET ,socket.SOCK_STREAM ) as s: return s.connect_ex(("localhost", port) ) == 0
329
1
from __future__ import annotations def __snake_case ( __UpperCamelCase : list[int] ,__UpperCamelCase : int ): """simple docstring""" if len(__UpperCamelCase ) == 0: return False A_ = len(__UpperCamelCase ) // 2 if a_list[midpoint] == item: return True if item < a_list[midpoint]: return binary_search(a_list[:midpoint] ,__UpperCamelCase ) else: return binary_search(a_list[midpoint + 1 :] ,__UpperCamelCase ) if __name__ == "__main__": __a :int = input('Enter numbers separated by comma:\n').strip() __a :Dict = [int(item.strip()) for item in user_input.split(',')] __a :str = int(input('Enter the number to be found in the list:\n').strip()) __a :Dict = '' if binary_search(sequence, target) else 'not ' print(F"{target} was {not_str}found in {sequence}")
329
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : int ): A_ = tempfile.mkdtemp() A_ = BlipImageProcessor() A_ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" ) A_ = BlipProcessor(UpperCAmelCase , UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def __A ( self : Optional[int] , **UpperCAmelCase : Union[str, Any] ): return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).tokenizer def __A ( self : Optional[Any] , **UpperCAmelCase : int ): return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).image_processor def __A ( self : Any ): shutil.rmtree(self.tmpdirname ) def __A ( self : Dict ): A_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] A_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def __A ( self : Any ): A_ = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) A_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 ) A_ = BlipProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase ) def __A ( self : Dict ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = self.prepare_image_inputs() A_ = image_processor(UpperCAmelCase , return_tensors="np" ) A_ = processor(images=UpperCAmelCase , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __A ( self : int ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = "lower newer" A_ = processor(text=UpperCAmelCase ) A_ = tokenizer(UpperCAmelCase , return_token_type_ids=UpperCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self : Tuple ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = "lower newer" A_ = self.prepare_image_inputs() A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase ): processor() def __A ( self : Any ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A_ = processor.batch_decode(UpperCAmelCase ) A_ = tokenizer.batch_decode(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) def __A ( self : Optional[Any] ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = "lower newer" A_ = self.prepare_image_inputs() A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
329
1
from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize("repo_id" ,["canonical_dataset_name", "org-name/dataset-name"] ) @pytest.mark.parametrize("path" ,["filename.csv", "filename with blanks.csv"] ) @pytest.mark.parametrize("revision" ,[None, "v2"] ) def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Any ): """simple docstring""" A_ = hf_hub_url(repo_id=__UpperCamelCase ,path=__UpperCamelCase ,revision=__UpperCamelCase ) assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(__UpperCamelCase )}'''
329
import math __a :Union[str, Any] = 10 __a :Union[str, Any] = 7 __a :int = BALLS_PER_COLOUR * NUM_COLOURS def __snake_case ( __UpperCamelCase : int = 20 ): """simple docstring""" A_ = math.comb(__UpperCamelCase ,__UpperCamelCase ) A_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR ,__UpperCamelCase ) A_ = NUM_COLOURS * (1 - missing_colour / total) return f'''{result:.9f}''' if __name__ == "__main__": print(solution(20))
329
1
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str]=False ): """simple docstring""" A_ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''module.blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''module.blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (f'''module.blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''module.blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''module.blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''module.blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''module.blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''module.blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''module.blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''module.blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("module.cls_token", "vit.embeddings.cls_token"), ("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"), ("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"), ("module.pos_embed", "vit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("module.norm.weight", "layernorm.weight"), ("module.norm.bias", "layernorm.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" A_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Any ,__UpperCamelCase : Optional[Any]=False ): """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: A_ = "" else: A_ = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A_ = state_dict.pop(f'''module.blocks.{i}.attn.qkv.weight''' ) A_ = state_dict.pop(f'''module.blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict A_ = in_proj_weight[ : config.hidden_size, : ] A_ = in_proj_bias[: config.hidden_size] A_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A_ = in_proj_weight[ -config.hidden_size :, : ] A_ = in_proj_bias[-config.hidden_size :] def __snake_case ( __UpperCamelCase : Union[str, Any] ): """simple docstring""" A_ = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(__UpperCamelCase ,__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Any ): """simple docstring""" A_ = [ "module.fc.fc1.weight", "module.fc.fc1.bias", "module.fc.bn1.weight", "module.fc.bn1.bias", "module.fc.bn1.running_mean", "module.fc.bn1.running_var", "module.fc.bn1.num_batches_tracked", "module.fc.fc2.weight", "module.fc.fc2.bias", "module.fc.bn2.weight", "module.fc.bn2.bias", "module.fc.bn2.running_mean", "module.fc.bn2.running_var", "module.fc.bn2.num_batches_tracked", "module.fc.fc3.weight", "module.fc.fc3.bias", ] for k in ignore_keys: state_dict.pop(__UpperCamelCase ,__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Any ,__UpperCamelCase : Union[str, Any] ): """simple docstring""" A_ = dct.pop(__UpperCamelCase ) A_ = val def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : List[Any] ): """simple docstring""" A_ = ViTMSNConfig() A_ = 1000 A_ = "datasets/huggingface/label-files" A_ = "imagenet-1k-id2label.json" A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ) ,"r" ) ) A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()} A_ = idalabel A_ = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: A_ = 384 A_ = 1536 A_ = 6 elif "l16" in checkpoint_url: A_ = 1024 A_ = 4096 A_ = 24 A_ = 16 A_ = 0.1 elif "b4" in checkpoint_url: A_ = 4 elif "l7" in checkpoint_url: A_ = 7 A_ = 1024 A_ = 4096 A_ = 24 A_ = 16 A_ = 0.1 A_ = ViTMSNModel(__UpperCamelCase ) A_ = torch.hub.load_state_dict_from_url(__UpperCamelCase ,map_location="cpu" )["target_encoder"] A_ = ViTImageProcessor(size=config.image_size ) remove_projection_head(__UpperCamelCase ) A_ = create_rename_keys(__UpperCamelCase ,base_model=__UpperCamelCase ) for src, dest in rename_keys: rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) read_in_q_k_v(__UpperCamelCase ,__UpperCamelCase ,base_model=__UpperCamelCase ) model.load_state_dict(__UpperCamelCase ) model.eval() A_ = "http://images.cocodataset.org/val2017/000000039769.jpg" A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw ) A_ = ViTImageProcessor( size=config.image_size ,image_mean=__UpperCamelCase ,image_std=__UpperCamelCase ) A_ = image_processor(images=__UpperCamelCase ,return_tensors="pt" ) # forward pass torch.manual_seed(2 ) A_ = model(**__UpperCamelCase ) A_ = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: A_ = torch.tensor([[-1.0915, -1.4876, -1.1809]] ) elif "b16" in checkpoint_url: A_ = torch.tensor([[14.2889, -18.9045, 11.7281]] ) elif "l16" in checkpoint_url: A_ = torch.tensor([[41.5028, -22.8681, 45.6475]] ) elif "b4" in checkpoint_url: A_ = torch.tensor([[-4.3868, 5.2932, -0.4137]] ) else: A_ = torch.tensor([[-0.1792, -0.6465, 2.4263]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] ,__UpperCamelCase ,atol=1E-4 ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__UpperCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __a :Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar', type=str, help='URL of the checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) __a :Optional[int] = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
329
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer __a :Optional[Any] = logging.get_logger(__name__) __a :Any = {'vocab_file': 'vocab.txt'} __a :Any = { 'vocab_file': { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt', } } __a :List[str] = { 'YituTech/conv-bert-base': 512, 'YituTech/conv-bert-medium-small': 512, 'YituTech/conv-bert-small': 512, } __a :List[str] = { 'YituTech/conv-bert-base': {'do_lower_case': True}, 'YituTech/conv-bert-medium-small': {'do_lower_case': True}, 'YituTech/conv-bert-small': {'do_lower_case': True}, } class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Tuple = VOCAB_FILES_NAMES _lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION _lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase : Union[str, Any] = ConvBertTokenizer def __init__( self : Optional[int] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : int="[UNK]" , UpperCAmelCase : str="[SEP]" , UpperCAmelCase : Union[str, Any]="[PAD]" , UpperCAmelCase : Tuple="[CLS]" , UpperCAmelCase : Tuple="[MASK]" , UpperCAmelCase : Any=True , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : List[str] , ): super().__init__( UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , ) A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars ): A_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) ) A_ = do_lower_case A_ = strip_accents A_ = tokenize_chinese_chars A_ = normalizer_class(**UpperCAmelCase ) A_ = do_lower_case def __A ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Dict=None ): A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __A ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ): A_ = [self.sep_token_id] A_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ): A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase ) return tuple(UpperCAmelCase )
329
1
from __future__ import annotations def __snake_case ( __UpperCamelCase : int = 4 ): """simple docstring""" A_ = abs(__UpperCamelCase ) or 4 return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )] def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" return reverse_row(transpose(__UpperCamelCase ) ) # OR.. transpose(reverse_column(matrix)) def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" return reverse_row(reverse_column(__UpperCamelCase ) ) # OR.. reverse_column(reverse_row(matrix)) def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" return reverse_column(transpose(__UpperCamelCase ) ) # OR.. transpose(reverse_row(matrix)) def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" A_ = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )] return matrix def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" A_ = matrix[::-1] return matrix def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" A_ = [x[::-1] for x in matrix] return matrix def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" for i in matrix: print(*__UpperCamelCase ) if __name__ == "__main__": __a :Any = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 90 counterclockwise:\n') print_matrix(rotate_aa(matrix)) __a :Any = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 180:\n') print_matrix(rotate_aaa(matrix)) __a :Any = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 270 counterclockwise:\n') print_matrix(rotate_aaa(matrix))
329
import warnings from ...utils import logging from .image_processing_videomae import VideoMAEImageProcessor __a :Optional[Any] = logging.get_logger(__name__) class _a ( snake_case_ ): """simple docstring""" def __init__( self : List[str] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ): warnings.warn( "The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use VideoMAEImageProcessor instead." , UpperCAmelCase , ) super().__init__(*UpperCAmelCase , **UpperCAmelCase )
329
1
import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy __a :List[str] = logging.getLogger(__name__) def __snake_case ( __UpperCamelCase : torch.nn.Module ,__UpperCamelCase : BnbQuantizationConfig ,__UpperCamelCase : Union[str, os.PathLike] = None ,__UpperCamelCase : Optional[Dict[str, Union[int, str, torch.device]]] = None ,__UpperCamelCase : Optional[List[str]] = None ,__UpperCamelCase : Optional[Dict[Union[int, str], Union[int, str]]] = None ,__UpperCamelCase : Optional[Union[str, os.PathLike]] = None ,__UpperCamelCase : bool = False ,): """simple docstring""" A_ = bnb_quantization_config.load_in_abit A_ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( "You have a version of `bitsandbytes` that is not compatible with 8bit quantization," " make sure you have the latest version of `bitsandbytes` installed." ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( "You have a version of `bitsandbytes` that is not compatible with 4bit quantization," "make sure you have the latest version of `bitsandbytes` installed." ) A_ = [] # custom device map if isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(device_map.keys() ) > 1: A_ = [key for key, value in device_map.items() if value in ["disk", "cpu"]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: A_ = get_keys_to_not_convert(__UpperCamelCase ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(__UpperCamelCase ) A_ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: A_ = [] A_ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(__UpperCamelCase ) # compatibility with peft A_ = load_in_abit A_ = load_in_abit A_ = get_parameter_device(__UpperCamelCase ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( "It is not recommended to quantize a loaded model. " "The model should be instantiated under the `init_empty_weights` context manager." ) A_ = replace_with_bnb_layers(__UpperCamelCase ,__UpperCamelCase ,modules_to_not_convert=__UpperCamelCase ) # convert param to the right dtype A_ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: A_ = name.replace(".weight" ,"" ).replace(".bias" ,"" ) A_ = getattr(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(__UpperCamelCase ): param.to(__UpperCamelCase ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("No GPU found. A GPU is needed for quantization." ) logger.info( f'''The model device type is {model_device.type}. However, cuda is needed for quantization.''' "We move the model to cuda." ) return model elif weights_location is None: raise RuntimeError( f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' ) else: with init_empty_weights(): A_ = replace_with_bnb_layers( __UpperCamelCase ,__UpperCamelCase ,modules_to_not_convert=__UpperCamelCase ) A_ = get_quantized_model_device_map( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,max_memory=__UpperCamelCase ,no_split_module_classes=__UpperCamelCase ,) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): A_ = True A_ = any(x in list(device_map.values() ) for x in ["cpu", "disk"] ) load_checkpoint_in_model( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,dtype=bnb_quantization_config.torch_dtype ,offload_folder=__UpperCamelCase ,offload_state_dict=__UpperCamelCase ,keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules ,offload_abit_bnb=load_in_abit and offload ,) return dispatch_model(__UpperCamelCase ,device_map=__UpperCamelCase ,offload_dir=__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : int ,__UpperCamelCase : Tuple=None ,__UpperCamelCase : str=None ,__UpperCamelCase : int=None ): """simple docstring""" if device_map is None: if torch.cuda.is_available(): A_ = {"": torch.cuda.current_device()} else: raise RuntimeError("No GPU found. A GPU is needed for quantization." ) logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." ) if isinstance(__UpperCamelCase ,__UpperCamelCase ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or " "'sequential'." ) A_ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) A_ = {} A_ = special_dtypes A_ = no_split_module_classes A_ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": A_ = get_balanced_memory( __UpperCamelCase ,low_zero=(device_map == "balanced_low_0") ,max_memory=__UpperCamelCase ,**__UpperCamelCase ,) A_ = max_memory A_ = infer_auto_device_map(__UpperCamelCase ,**__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ): # check if don't have any quantized module on the cpu A_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules A_ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( "\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " ) else: logger.info( "Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" ) del device_map_without_some_modules return device_map def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : List[Any]=None ,__UpperCamelCase : List[str]=None ): """simple docstring""" if modules_to_not_convert is None: A_ = [] A_ , A_ = _replace_with_bnb_layers( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) if not has_been_replaced: logger.warning( "You are loading your model in 8bit or 4bit but no linear modules were found in your model." " this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers." " Please double check your model architecture, or submit an issue on github if you think this is" " a bug." ) return model def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Union[str, Any]=None ,__UpperCamelCase : Optional[Any]=None ,): """simple docstring""" A_ = False for name, module in model.named_children(): if current_key_name is None: A_ = [] current_key_name.append(__UpperCamelCase ) if isinstance(__UpperCamelCase ,nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` A_ = ".".join(__UpperCamelCase ) A_ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: A_ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: A_ = bnb.nn.LinearabitLt( module.in_features ,module.out_features ,module.bias is not None ,has_fpaa_weights=__UpperCamelCase ,threshold=bnb_quantization_config.llm_inta_threshold ,) elif bnb_quantization_config.load_in_abit: A_ = bnb.nn.Linearabit( module.in_features ,module.out_features ,module.bias is not None ,bnb_quantization_config.bnb_abit_compute_dtype ,compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant ,quant_type=bnb_quantization_config.bnb_abit_quant_type ,) else: raise ValueError("load_in_8bit and load_in_4bit can't be both False" ) A_ = module.weight.data if module.bias is not None: A_ = module.bias.data bnb_module.requires_grad_(__UpperCamelCase ) setattr(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) A_ = True if len(list(module.children() ) ) > 0: A_ , A_ = _replace_with_bnb_layers( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) A_ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def __snake_case ( __UpperCamelCase : Any ): """simple docstring""" with init_empty_weights(): A_ = deepcopy(__UpperCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager` A_ = find_tied_parameters(__UpperCamelCase ) # For compatibility with Accelerate < 0.18 if isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() ) else: A_ = sum(__UpperCamelCase ,[] ) A_ = len(__UpperCamelCase ) > 0 # Check if it is a base model A_ = False if hasattr(__UpperCamelCase ,"base_model_prefix" ): A_ = not hasattr(__UpperCamelCase ,model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head A_ = list(model.named_children() ) A_ = [list_modules[-1][0]] # add last module together with tied weights A_ = set(__UpperCamelCase ) - set(__UpperCamelCase ) A_ = list(set(__UpperCamelCase ) ) + list(__UpperCamelCase ) # remove ".weight" from the keys A_ = [".weight", ".bias"] A_ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: A_ = name.replace(__UpperCamelCase ,"" ) filtered_module_names.append(__UpperCamelCase ) return filtered_module_names def __snake_case ( __UpperCamelCase : str ): """simple docstring""" for m in model.modules(): if isinstance(__UpperCamelCase ,bnb.nn.Linearabit ): return True return False def __snake_case ( __UpperCamelCase : nn.Module ): """simple docstring""" return next(parameter.parameters() ).device def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Any ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Any ): """simple docstring""" if fpaa_statistics is None: set_module_tensor_to_device(__UpperCamelCase ,__UpperCamelCase ,0 ,dtype=__UpperCamelCase ,value=__UpperCamelCase ) A_ = param_name A_ = model if "." in tensor_name: A_ = tensor_name.split("." ) for split in splits[:-1]: A_ = getattr(__UpperCamelCase ,__UpperCamelCase ) if new_module is None: raise ValueError(f'''{module} has no attribute {split}.''' ) A_ = new_module A_ = splits[-1] # offload weights A_ = False offload_weight(module._parameters[tensor_name] ,__UpperCamelCase ,__UpperCamelCase ,index=__UpperCamelCase ) if hasattr(module._parameters[tensor_name] ,"SCB" ): offload_weight( module._parameters[tensor_name].SCB ,param_name.replace("weight" ,"SCB" ) ,__UpperCamelCase ,index=__UpperCamelCase ,) else: offload_weight(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,index=__UpperCamelCase ) offload_weight(__UpperCamelCase ,param_name.replace("weight" ,"SCB" ) ,__UpperCamelCase ,index=__UpperCamelCase ) set_module_tensor_to_device(__UpperCamelCase ,__UpperCamelCase ,"meta" ,dtype=__UpperCamelCase ,value=torch.empty(*param.size() ) )
329
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _a : """simple docstring""" @staticmethod def __A ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Union[str, Any] ): pass @is_pipeline_test @require_vision class _a ( unittest.TestCase ): """simple docstring""" @require_torch def __A ( self : List[str] ): A_ = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , ) A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(UpperCAmelCase ) , [ [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}], ] , ) A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], ] , ) @require_tf def __A ( self : int ): A_ = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" ) A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , ) A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], ] , ) @slow @require_torch def __A ( self : Any ): A_ = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , ) # This is an image of 2 cats with remotes and no planes A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , ) @slow @require_tf def __A ( self : Optional[Any] ): A_ = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" ) # This is an image of 2 cats with remotes and no planes A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , )
329
1
import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[str] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Tuple=True ,__UpperCamelCase : Tuple="pt" ): """simple docstring""" A_ = {"add_prefix_space": True} if isinstance(__UpperCamelCase ,__UpperCamelCase ) and not line.startswith(" " ) else {} A_ = padding_side return tokenizer( [line] ,max_length=__UpperCamelCase ,padding="max_length" if pad_to_max_length else None ,truncation=__UpperCamelCase ,return_tensors=__UpperCamelCase ,add_special_tokens=__UpperCamelCase ,**__UpperCamelCase ,) def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Optional[int]=None ,): """simple docstring""" A_ = input_ids.ne(__UpperCamelCase ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class _a ( snake_case_ ): """simple docstring""" def __init__( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any]="train" , UpperCAmelCase : Any=None , UpperCAmelCase : str=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Dict="" , ): super().__init__() A_ = Path(UpperCAmelCase ).joinpath(type_path + ".source" ) A_ = Path(UpperCAmelCase ).joinpath(type_path + ".target" ) A_ = self.get_char_lens(self.src_file ) A_ = max_source_length A_ = max_target_length assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}''' A_ = tokenizer A_ = prefix if n_obs is not None: A_ = self.src_lens[:n_obs] A_ = src_lang A_ = tgt_lang def __len__( self : Union[str, Any] ): return len(self.src_lens ) def __getitem__( self : Optional[int] , UpperCAmelCase : List[str] ): A_ = index + 1 # linecache starts at 1 A_ = self.prefix + linecache.getline(str(self.src_file ) , UpperCAmelCase ).rstrip("\n" ) A_ = linecache.getline(str(self.tgt_file ) , UpperCAmelCase ).rstrip("\n" ) assert source_line, f'''empty source line for index {index}''' assert tgt_line, f'''empty tgt line for index {index}''' # Need to add eos token manually for T5 if isinstance(self.tokenizer , UpperCAmelCase ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right A_ = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , UpperCAmelCase ) else self.tokenizer ) A_ = self.tokenizer.generator if isinstance(self.tokenizer , UpperCAmelCase ) else self.tokenizer A_ = encode_line(UpperCAmelCase , UpperCAmelCase , self.max_source_length , "right" ) A_ = encode_line(UpperCAmelCase , UpperCAmelCase , self.max_target_length , "right" ) A_ = source_inputs["input_ids"].squeeze() A_ = target_inputs["input_ids"].squeeze() A_ = source_inputs["attention_mask"].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def __A ( UpperCAmelCase : Dict ): return [len(UpperCAmelCase ) for x in Path(UpperCAmelCase ).open().readlines()] def __A ( self : Optional[Any] , UpperCAmelCase : Optional[Any] ): A_ = torch.stack([x["input_ids"] for x in batch] ) A_ = torch.stack([x["attention_mask"] for x in batch] ) A_ = torch.stack([x["decoder_input_ids"] for x in batch] ) A_ = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , UpperCAmelCase ) else self.tokenizer.pad_token_id ) A_ = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , UpperCAmelCase ) else self.tokenizer.pad_token_id ) A_ = trim_batch(UpperCAmelCase , UpperCAmelCase ) A_ , A_ = trim_batch(UpperCAmelCase , UpperCAmelCase , attention_mask=UpperCAmelCase ) A_ = { "input_ids": source_ids, "attention_mask": source_mask, "decoder_input_ids": y, } return batch __a :int = getLogger(__name__) def __snake_case ( __UpperCamelCase : List[List] ): """simple docstring""" return list(itertools.chain.from_iterable(__UpperCamelCase ) ) def __snake_case ( __UpperCamelCase : str ): """simple docstring""" A_ = get_git_info() save_json(__UpperCamelCase ,os.path.join(__UpperCamelCase ,"git_log.json" ) ) def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Tuple=4 ,**__UpperCamelCase : int ): """simple docstring""" with open(__UpperCamelCase ,"w" ) as f: json.dump(__UpperCamelCase ,__UpperCamelCase ,indent=__UpperCamelCase ,**__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Dict ): """simple docstring""" with open(__UpperCamelCase ) as f: return json.load(__UpperCamelCase ) def __snake_case ( ): """simple docstring""" A_ = git.Repo(search_parent_directories=__UpperCamelCase ) A_ = { "repo_id": str(__UpperCamelCase ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), "hostname": str(socket.gethostname() ), } return repo_infos def __snake_case ( __UpperCamelCase : Callable ,__UpperCamelCase : Iterable ): """simple docstring""" return list(map(__UpperCamelCase ,__UpperCamelCase ) ) def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Dict ): """simple docstring""" with open(__UpperCamelCase ,"wb" ) as f: return pickle.dump(__UpperCamelCase ,__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Optional[int] ): """simple docstring""" def remove_articles(__UpperCamelCase : Optional[int] ): return re.sub(R"\b(a|an|the)\b" ," " ,__UpperCamelCase ) def white_space_fix(__UpperCamelCase : List[Any] ): return " ".join(text.split() ) def remove_punc(__UpperCamelCase : Tuple ): A_ = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__UpperCamelCase : Union[str, Any] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__UpperCamelCase ) ) ) ) def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : str ): """simple docstring""" A_ = normalize_answer(__UpperCamelCase ).split() A_ = normalize_answer(__UpperCamelCase ).split() A_ = Counter(__UpperCamelCase ) & Counter(__UpperCamelCase ) A_ = sum(common.values() ) if num_same == 0: return 0 A_ = 1.0 * num_same / len(__UpperCamelCase ) A_ = 1.0 * num_same / len(__UpperCamelCase ) A_ = (2 * precision * recall) / (precision + recall) return fa def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[int] ): """simple docstring""" return normalize_answer(__UpperCamelCase ) == normalize_answer(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[str] ): """simple docstring""" assert len(__UpperCamelCase ) == len(__UpperCamelCase ) A_ = 0 for hypo, pred in zip(__UpperCamelCase ,__UpperCamelCase ): em += exact_match_score(__UpperCamelCase ,__UpperCamelCase ) if len(__UpperCamelCase ) > 0: em /= len(__UpperCamelCase ) return {"em": em} def __snake_case ( __UpperCamelCase : Dict ): """simple docstring""" return model_prefix.startswith("rag" ) def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : str ): """simple docstring""" A_ = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead A_ = "dropout_rate" for p in extra_params: if getattr(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ): if not hasattr(__UpperCamelCase ,__UpperCamelCase ) and not hasattr(__UpperCamelCase ,equivalent_param[p] ): logger.info("config doesn't have a `{}` attribute".format(__UpperCamelCase ) ) delattr(__UpperCamelCase ,__UpperCamelCase ) continue A_ = p if hasattr(__UpperCamelCase ,__UpperCamelCase ) else equivalent_param[p] setattr(__UpperCamelCase ,__UpperCamelCase ,getattr(__UpperCamelCase ,__UpperCamelCase ) ) delattr(__UpperCamelCase ,__UpperCamelCase ) return hparams, config
329
import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict=10 ): """simple docstring""" A_ = [] for _ in range(__UpperCamelCase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Tuple=10 ): """simple docstring""" A_ = [] for step in range(__UpperCamelCase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: A_ = os.path.join(__UpperCamelCase ,"schedule.bin" ) torch.save(scheduler.state_dict() ,__UpperCamelCase ) A_ = torch.load(__UpperCamelCase ) scheduler.load_state_dict(__UpperCamelCase ) return lrs @require_torch class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : Any , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ): self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) ) for a, b in zip(UpperCAmelCase , UpperCAmelCase ): self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase ) def __A ( self : List[Any] ): A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase ) A_ = torch.tensor([0.4, 0.2, -0.5] ) A_ = nn.MSELoss() # No warmup, constant schedule, no gradient clipping A_ = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 ) for _ in range(100 ): A_ = criterion(UpperCAmelCase , UpperCAmelCase ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) def __A ( self : Dict ): A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase ) A_ = torch.tensor([0.4, 0.2, -0.5] ) A_ = nn.MSELoss() # No warmup, constant schedule, no gradient clipping A_ = Adafactor( params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCAmelCase , weight_decay=0.0 , relative_step=UpperCAmelCase , scale_parameter=UpperCAmelCase , warmup_init=UpperCAmelCase , ) for _ in range(1000 ): A_ = criterion(UpperCAmelCase , UpperCAmelCase ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) @require_torch class _a ( unittest.TestCase ): """simple docstring""" _lowerCamelCase : Optional[int] = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None _lowerCamelCase : Any = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None _lowerCamelCase : Any = 1_0 def __A ( self : str , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Dict=None ): self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) ) for a, b in zip(UpperCAmelCase , UpperCAmelCase ): self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase , msg=UpperCAmelCase ) def __A ( self : List[Any] ): A_ = {"num_warmup_steps": 2, "num_training_steps": 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) A_ = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"num_warmup_steps": 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, "num_cycles": 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, "power": 2.0, "lr_end": 1E-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {"num_warmup_steps": 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): A_ , A_ = data A_ = scheduler_func(self.optimizer , **UpperCAmelCase ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) A_ = unwrap_schedule(UpperCAmelCase , self.num_steps ) self.assertListAlmostEqual( UpperCAmelCase , UpperCAmelCase , tol=1E-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , ) A_ = scheduler_func(self.optimizer , **UpperCAmelCase ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(UpperCAmelCase ) # wrap to test picklability of the schedule A_ = unwrap_and_save_reload_schedule(UpperCAmelCase , self.num_steps ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase , msg=f'''failed for {scheduler_func} in save and reload''' ) class _a : """simple docstring""" def __init__( self : List[str] , UpperCAmelCase : List[str] ): A_ = fn def __call__( self : Union[str, Any] , *UpperCAmelCase : str , **UpperCAmelCase : Optional[Any] ): return self.fn(*UpperCAmelCase , **UpperCAmelCase ) @classmethod def __A ( self : Dict , UpperCAmelCase : List[str] ): A_ = list(map(self , scheduler.lr_lambdas ) )
329
1
import math __a :Union[str, Any] = 10 __a :Union[str, Any] = 7 __a :int = BALLS_PER_COLOUR * NUM_COLOURS def __snake_case ( __UpperCamelCase : int = 20 ): """simple docstring""" A_ = math.comb(__UpperCamelCase ,__UpperCamelCase ) A_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR ,__UpperCamelCase ) A_ = NUM_COLOURS * (1 - missing_colour / total) return f'''{result:.9f}''' if __name__ == "__main__": print(solution(20))
329
import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def __snake_case ( __UpperCamelCase : Optional[int] ): # picklable for multiprocessing """simple docstring""" return x.sum() def __snake_case ( __UpperCamelCase : List[str] ): # picklable for multiprocessing """simple docstring""" return i + 1 @dataclass class _a : """simple docstring""" _lowerCamelCase : int _lowerCamelCase : str class _a ( snake_case_ ): """simple docstring""" def __A ( self : Dict ): A_ = {} A_ = [] A_ = 1 A_ = [1, 2] A_ = {"a": 1, "b": 2} A_ = {"a": [1, 2], "b": [3, 4]} A_ = {"a": {"1": 1}, "b": 2} A_ = {"a": 1, "b": 2, "c": 3, "d": 4} A_ = {} A_ = [] A_ = 2 A_ = [2, 3] A_ = {"a": 2, "b": 3} A_ = {"a": [2, 3], "b": [4, 5]} A_ = {"a": {"1": 2}, "b": 3} A_ = {"a": 2, "b": 3, "c": 4, "d": 5} self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) A_ = 2 self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) A_ = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )} A_ = {"a": 2, "b": 0, "c": 2} A_ = { "a": np.eye(2 ).astype(UpperCAmelCase ), "b": np.zeros(3 ).astype(UpperCAmelCase ), "c": np.ones(2 ).astype(UpperCAmelCase ), } self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual( {k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual( {k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(UpperCAmelCase ): # can't pickle a local lambda map_nested(lambda UpperCAmelCase : x + 1 , UpperCAmelCase , num_proc=UpperCAmelCase ) def __A ( self : List[str] ): A_ = {"a": 1, "b": 2} A_ = {"a": 3, "b": 4} A_ = {"a": 5, "b": 6} A_ = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ) , UpperCAmelCase ) def __A ( self : Any ): class _a : """simple docstring""" _lowerCamelCase : int = 'bar' A_ = Foo() self.assertEqual(foo.my_attr , "bar" ) with temporary_assignment(UpperCAmelCase , "my_attr" , "BAR" ): self.assertEqual(foo.my_attr , "BAR" ) self.assertEqual(foo.my_attr , "bar" ) @pytest.mark.parametrize( "iterable_length, num_proc, expected_num_proc" ,[ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ] ,) def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[Any] ): """simple docstring""" with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch( "datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool: A_ = {f'''{i}''': i for i in range(__UpperCamelCase )} A_ = map_nested(lambda __UpperCamelCase : x + 10 ,__UpperCamelCase ,num_proc=__UpperCamelCase ,parallel_min_length=16 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class _a ( snake_case_ ): """simple docstring""" @require_tf def __A ( self : Union[str, Any] ): import tensorflow as tf from tensorflow.keras import layers A_ = layers.Dense(2 ) def gen_random_output(): A_ = tf.random.uniform((1, 3) ) return model(UpperCAmelCase ).numpy() with temp_seed(42 , set_tensorflow=UpperCAmelCase ): A_ = gen_random_output() with temp_seed(42 , set_tensorflow=UpperCAmelCase ): A_ = gen_random_output() A_ = gen_random_output() np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @require_torch def __A ( self : Optional[int] ): import torch def gen_random_output(): A_ = torch.nn.Linear(3 , 2 ) A_ = torch.rand(1 , 3 ) return model(UpperCAmelCase ).detach().numpy() with temp_seed(42 , set_pytorch=UpperCAmelCase ): A_ = gen_random_output() with temp_seed(42 , set_pytorch=UpperCAmelCase ): A_ = gen_random_output() A_ = gen_random_output() np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) def __A ( self : Any ): def gen_random_output(): return np.random.rand(1 , 3 ) with temp_seed(42 ): A_ = gen_random_output() with temp_seed(42 ): A_ = gen_random_output() A_ = gen_random_output() np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @pytest.mark.parametrize("input_data" ,[{}] ) def __snake_case ( __UpperCamelCase : str ): """simple docstring""" A_ = NestedDataStructure(__UpperCamelCase ).data assert output_data == input_data @pytest.mark.parametrize( "data, expected_output" ,[ ({}, []), ([], []), ("foo", ["foo"]), (["foo", "bar"], ["foo", "bar"]), ([["foo", "bar"]], ["foo", "bar"]), ([[["foo"], ["bar"]]], ["foo", "bar"]), ([[["foo"], "bar"]], ["foo", "bar"]), ({"a": 1, "b": 2}, [1, 2]), ({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]), ({"a": {"1": 1}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": [2]}, [1, 2]), ] ,) def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Any ): """simple docstring""" A_ = NestedDataStructure(__UpperCamelCase ).flatten() assert output == expected_output def __snake_case ( ): """simple docstring""" A_ = A(x=1 ,y="foobar" ) A_ = {"x": 1, "y": "foobar"} assert asdict(__UpperCamelCase ) == expected_output A_ = {"a": {"b": A(x=10 ,y="foo" )}, "c": [A(x=20 ,y="bar" )]} A_ = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]} assert asdict(__UpperCamelCase ) == expected_output with pytest.raises(__UpperCamelCase ): asdict([1, A(x=10 ,y="foo" )] ) def __snake_case ( __UpperCamelCase : str ): """simple docstring""" return text.split() def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def __snake_case ( ): """simple docstring""" with Pool(2 ) as pool: A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) ) assert out.count("hello" ) == 10 assert out.count("there" ) == 10 assert len(__UpperCamelCase ) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) ) assert out.count("hello" ) == 10 assert out.count("there" ) == 10 assert len(__UpperCamelCase ) == 20 # check that we get items as fast as possible with Pool(2 ) as pool: A_ = [] for yield_time, content in iflatmap_unordered( __UpperCamelCase ,_aseconds_generator_of_aitems_with_timing ,kwargs_iterable=[{"content": "a"}, {"content": "b"}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(__UpperCamelCase ) assert out.count("a" ) == 2 assert out.count("b" ) == 2 assert len(__UpperCamelCase ) == 4
329
1
import argparse import os import re import packaging.version __a :Union[str, Any] = 'examples/' __a :List[str] = { 'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), 'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), 'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'), 'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'), } __a :Optional[Any] = { 'init': 'src/transformers/__init__.py', 'setup': 'setup.py', } __a :str = 'README.md' def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : str ,__UpperCamelCase : Optional[int] ): """simple docstring""" with open(__UpperCamelCase ,"r" ,encoding="utf-8" ,newline="\n" ) as f: A_ = f.read() A_ , A_ = REPLACE_PATTERNS[pattern] A_ = replace.replace("VERSION" ,__UpperCamelCase ) A_ = re_pattern.sub(__UpperCamelCase ,__UpperCamelCase ) with open(__UpperCamelCase ,"w" ,encoding="utf-8" ,newline="\n" ) as f: f.write(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Union[str, Any] ): """simple docstring""" for folder, directories, fnames in os.walk(__UpperCamelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("research_projects" ) if "legacy" in directories: directories.remove("legacy" ) for fname in fnames: if fname.endswith(".py" ): update_version_in_file(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase ,pattern="examples" ) def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : int=False ): """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) if not patch: update_version_in_examples(__UpperCamelCase ) def __snake_case ( ): """simple docstring""" A_ = "🤗 Transformers currently provides the following architectures" A_ = "1. Want to contribute a new model?" with open(__UpperCamelCase ,"r" ,encoding="utf-8" ,newline="\n" ) as f: A_ = f.readlines() # Find the start of the list. A_ = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 A_ = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("1." ): A_ = lines[index].replace( "https://huggingface.co/docs/transformers/main/model_doc" ,"https://huggingface.co/docs/transformers/model_doc" ,) index += 1 with open(__UpperCamelCase ,"w" ,encoding="utf-8" ,newline="\n" ) as f: f.writelines(__UpperCamelCase ) def __snake_case ( ): """simple docstring""" with open(REPLACE_FILES["init"] ,"r" ) as f: A_ = f.read() A_ = REPLACE_PATTERNS["init"][0].search(__UpperCamelCase ).groups()[0] return packaging.version.parse(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Union[str, Any]=False ): """simple docstring""" A_ = get_version() if patch and default_version.is_devrelease: raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" ) if default_version.is_devrelease: A_ = default_version.base_version elif patch: A_ = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: A_ = f'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. A_ = input(f'''Which version are you releasing? [{default_version}]''' ) if len(__UpperCamelCase ) == 0: A_ = default_version print(f'''Updating version to {version}.''' ) global_version_update(__UpperCamelCase ,patch=__UpperCamelCase ) if not patch: print("Cleaning main README, don't forget to run `make fix-copies`." ) clean_main_ref_in_model_list() def __snake_case ( ): """simple docstring""" A_ = get_version() A_ = f'''{current_version.major}.{current_version.minor + 1}.0.dev0''' A_ = current_version.base_version # Check with the user we got that right. A_ = input(f'''Which version are we developing now? [{dev_version}]''' ) if len(__UpperCamelCase ) == 0: A_ = dev_version print(f'''Updating version to {version}.''' ) global_version_update(__UpperCamelCase ) print("Cleaning main README, don't forget to run `make fix-copies`." ) clean_main_ref_in_model_list() if __name__ == "__main__": __a :List[str] = argparse.ArgumentParser() parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.') parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.') __a :str = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('Nothing to do after a patch :-)') else: post_release_work()
329
import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" if ( (cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F) or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) # or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) # or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) # or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) # or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) # or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F) or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) # ): # return True return False def __snake_case ( __UpperCamelCase : str ): """simple docstring""" for char in word: A_ = ord(__UpperCamelCase ) if not _is_chinese_char(__UpperCamelCase ): return 0 return 1 def __snake_case ( __UpperCamelCase : List[str] ): """simple docstring""" A_ = set() for token in tokens: A_ = len(__UpperCamelCase ) > 1 and is_chinese(__UpperCamelCase ) if chinese_word: word_set.add(__UpperCamelCase ) A_ = list(__UpperCamelCase ) return word_list def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : set() ): """simple docstring""" if not chinese_word_set: return bert_tokens A_ = max([len(__UpperCamelCase ) for w in chinese_word_set] ) A_ = bert_tokens A_ , A_ = 0, len(__UpperCamelCase ) while start < end: A_ = True if is_chinese(bert_word[start] ): A_ = min(end - start ,__UpperCamelCase ) for i in range(__UpperCamelCase ,1 ,-1 ): A_ = "".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 ,start + i ): A_ = "##" + bert_word[j] A_ = start + i A_ = False break if single_word: start += 1 return bert_word def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : LTP ,__UpperCamelCase : BertTokenizer ): """simple docstring""" A_ = [] for i in range(0 ,len(__UpperCamelCase ) ,100 ): A_ = ltp_tokenizer.seg(lines[i : i + 100] )[0] A_ = [get_chinese_word(__UpperCamelCase ) for r in res] ltp_res.extend(__UpperCamelCase ) assert len(__UpperCamelCase ) == len(__UpperCamelCase ) A_ = [] for i in range(0 ,len(__UpperCamelCase ) ,100 ): A_ = bert_tokenizer(lines[i : i + 100] ,add_special_tokens=__UpperCamelCase ,truncation=__UpperCamelCase ,max_length=512 ) bert_res.extend(res["input_ids"] ) assert len(__UpperCamelCase ) == len(__UpperCamelCase ) A_ = [] for input_ids, chinese_word in zip(__UpperCamelCase ,__UpperCamelCase ): A_ = [] for id in input_ids: A_ = bert_tokenizer._convert_id_to_token(__UpperCamelCase ) input_tokens.append(__UpperCamelCase ) A_ = add_sub_symbol(__UpperCamelCase ,__UpperCamelCase ) A_ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(__UpperCamelCase ): if token[:2] == "##": A_ = token[2:] # save chinese tokens' pos if len(__UpperCamelCase ) == 1 and _is_chinese_char(ord(__UpperCamelCase ) ): ref_id.append(__UpperCamelCase ) ref_ids.append(__UpperCamelCase ) assert len(__UpperCamelCase ) == len(__UpperCamelCase ) return ref_ids def __snake_case ( __UpperCamelCase : Dict ): """simple docstring""" with open(args.file_name ,"r" ,encoding="utf-8" ) as f: A_ = f.readlines() A_ = [line.strip() for line in data if len(__UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' A_ = LTP(args.ltp ) # faster in GPU device A_ = BertTokenizer.from_pretrained(args.bert ) A_ = prepare_ref(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) with open(args.save_path ,"w" ,encoding="utf-8" ) as f: A_ = [json.dumps(__UpperCamelCase ) + "\n" for ref in ref_ids] f.writelines(__UpperCamelCase ) if __name__ == "__main__": __a :List[Any] = argparse.ArgumentParser(description='prepare_chinese_ref') parser.add_argument( '--file_name', type=str, default='./resources/chinese-demo.txt', help='file need process, same as training data in lm', ) parser.add_argument( '--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path' ) parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer') parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res') __a :Dict = parser.parse_args() main(args)
329
1
import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput __a :Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name def __snake_case ( __UpperCamelCase : Union[List, PIL.Image.Image, torch.Tensor] ): """simple docstring""" warnings.warn( "The preprocess method is deprecated and will be removed in a future version. Please" " use VaeImageProcessor.preprocess instead" ,__UpperCamelCase ,) if isinstance(__UpperCamelCase ,torch.Tensor ): return image elif isinstance(__UpperCamelCase ,PIL.Image.Image ): A_ = [image] if isinstance(image[0] ,PIL.Image.Image ): A_ , A_ = image[0].size A_ , A_ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 A_ = [np.array(i.resize((w, h) ,resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image] A_ = np.concatenate(__UpperCamelCase ,axis=0 ) A_ = np.array(__UpperCamelCase ).astype(np.floataa ) / 255.0 A_ = image.transpose(0 ,3 ,1 ,2 ) A_ = 2.0 * image - 1.0 A_ = torch.from_numpy(__UpperCamelCase ) elif isinstance(image[0] ,torch.Tensor ): A_ = torch.cat(__UpperCamelCase ,dim=0 ) return image def __snake_case ( __UpperCamelCase : Union[List, PIL.Image.Image, torch.Tensor] ): """simple docstring""" if isinstance(__UpperCamelCase ,torch.Tensor ): return mask elif isinstance(__UpperCamelCase ,PIL.Image.Image ): A_ = [mask] if isinstance(mask[0] ,PIL.Image.Image ): A_ , A_ = mask[0].size A_ , A_ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 A_ = [np.array(m.convert("L" ).resize((w, h) ,resample=PIL_INTERPOLATION["nearest"] ) )[None, :] for m in mask] A_ = np.concatenate(__UpperCamelCase ,axis=0 ) A_ = mask.astype(np.floataa ) / 255.0 A_ = 0 A_ = 1 A_ = torch.from_numpy(__UpperCamelCase ) elif isinstance(mask[0] ,torch.Tensor ): A_ = torch.cat(__UpperCamelCase ,dim=0 ) return mask class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : UNetaDModel _lowerCamelCase : RePaintScheduler def __init__( self : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ): super().__init__() self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase ) @torch.no_grad() def __call__( self : List[Any] , UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] , UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] , UpperCAmelCase : int = 250 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : int = 10 , UpperCAmelCase : int = 10 , UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , ): A_ = image A_ = _preprocess_image(UpperCAmelCase ) A_ = original_image.to(device=self.device , dtype=self.unet.dtype ) A_ = _preprocess_mask(UpperCAmelCase ) A_ = mask_image.to(device=self.device , dtype=self.unet.dtype ) A_ = original_image.shape[0] # sample gaussian noise to begin the loop if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(UpperCAmelCase )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) A_ = original_image.shape A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , self.device ) A_ = eta A_ = self.scheduler.timesteps[0] + 1 A_ = generator[0] if isinstance(UpperCAmelCase , UpperCAmelCase ) else generator for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): if t < t_last: # predict the noise residual A_ = self.unet(UpperCAmelCase , UpperCAmelCase ).sample # compute previous image: x_t -> x_t-1 A_ = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample else: # compute the reverse: x_t-1 -> x_t A_ = self.scheduler.undo_step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) A_ = t A_ = (image / 2 + 0.5).clamp(0 , 1 ) A_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": A_ = self.numpy_to_pil(UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCAmelCase )
329
import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def __snake_case ( __UpperCamelCase : Features ): """simple docstring""" A_ = np.inf def set_batch_size(__UpperCamelCase : FeatureType ) -> None: nonlocal batch_size if isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(__UpperCamelCase ,__UpperCamelCase ) and feature.dtype == "binary": A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(__UpperCamelCase ,__UpperCamelCase ) return None if batch_size is np.inf else batch_size class _a ( snake_case_ ): """simple docstring""" def __init__( self : Tuple , UpperCAmelCase : NestedDataStructureLike[PathLike] , UpperCAmelCase : Optional[NamedSplit] = None , UpperCAmelCase : Optional[Features] = None , UpperCAmelCase : str = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : Tuple , ): super().__init__( UpperCAmelCase , split=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , keep_in_memory=UpperCAmelCase , streaming=UpperCAmelCase , num_proc=UpperCAmelCase , **UpperCAmelCase , ) A_ = path_or_paths if isinstance(UpperCAmelCase , UpperCAmelCase ) else {self.split: path_or_paths} A_ = _PACKAGED_DATASETS_MODULES["parquet"][1] A_ = Parquet( cache_dir=UpperCAmelCase , data_files=UpperCAmelCase , features=UpperCAmelCase , hash=UpperCAmelCase , **UpperCAmelCase , ) def __A ( self : Optional[Any] ): # Build iterable dataset if self.streaming: A_ = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: A_ = None A_ = None A_ = None A_ = None self.builder.download_and_prepare( download_config=UpperCAmelCase , download_mode=UpperCAmelCase , verification_mode=UpperCAmelCase , base_path=UpperCAmelCase , num_proc=self.num_proc , ) A_ = self.builder.as_dataset( split=self.split , verification_mode=UpperCAmelCase , in_memory=self.keep_in_memory ) return dataset class _a : """simple docstring""" def __init__( self : Any , UpperCAmelCase : Dataset , UpperCAmelCase : Union[PathLike, BinaryIO] , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : List[Any] , ): A_ = dataset A_ = path_or_buf A_ = batch_size or get_writer_batch_size(dataset.features ) A_ = parquet_writer_kwargs def __A ( self : int ): A_ = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , "wb+" ) as buffer: A_ = self._write(file_obj=UpperCAmelCase , batch_size=UpperCAmelCase , **self.parquet_writer_kwargs ) else: A_ = self._write(file_obj=self.path_or_buf , batch_size=UpperCAmelCase , **self.parquet_writer_kwargs ) return written def __A ( self : Tuple , UpperCAmelCase : BinaryIO , UpperCAmelCase : int , **UpperCAmelCase : Optional[Any] ): A_ = 0 A_ = parquet_writer_kwargs.pop("path_or_buf" , UpperCAmelCase ) A_ = self.dataset.features.arrow_schema A_ = pq.ParquetWriter(UpperCAmelCase , schema=UpperCAmelCase , **UpperCAmelCase ) for offset in logging.tqdm( range(0 , len(self.dataset ) , UpperCAmelCase ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ): A_ = query_table( table=self.dataset._data , key=slice(UpperCAmelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(UpperCAmelCase ) written += batch.nbytes writer.close() return written
329
1
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: __a :Any = None __a :int = logging.get_logger(__name__) __a :Dict = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} __a :Dict = { 'vocab_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json' ), }, } __a :Any = { 'facebook/nllb-large-en-ro': 1024, 'facebook/nllb-200-distilled-600M': 1024, } # fmt: off __a :Optional[Any] = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Tuple = VOCAB_FILES_NAMES _lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase : Dict = ['input_ids', 'attention_mask'] _lowerCamelCase : Any = NllbTokenizer _lowerCamelCase : List[int] = [] _lowerCamelCase : List[int] = [] def __init__( self : Tuple , UpperCAmelCase : Any=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[Any]="</s>" , UpperCAmelCase : List[str]="</s>" , UpperCAmelCase : int="<s>" , UpperCAmelCase : Tuple="<unk>" , UpperCAmelCase : Any="<pad>" , UpperCAmelCase : Dict="<mask>" , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=None , UpperCAmelCase : Tuple=None , UpperCAmelCase : List[str]=False , **UpperCAmelCase : List[Any] , ): # Mask token behave like a normal word, i.e. include the space before it A_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token A_ = legacy_behaviour super().__init__( vocab_file=UpperCAmelCase , tokenizer_file=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , src_lang=UpperCAmelCase , tgt_lang=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , legacy_behaviour=UpperCAmelCase , **UpperCAmelCase , ) A_ = vocab_file A_ = False if not self.vocab_file else True A_ = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} ) A_ = { lang_code: self.convert_tokens_to_ids(UpperCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES } A_ = src_lang if src_lang is not None else "eng_Latn" A_ = self.convert_tokens_to_ids(self._src_lang ) A_ = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def __A ( self : int ): return self._src_lang @src_lang.setter def __A ( self : Tuple , UpperCAmelCase : str ): A_ = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __A ( self : List[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __A ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ): A_ = [self.sep_token_id] A_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __A ( self : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : Optional[str] , UpperCAmelCase : Optional[str] , **UpperCAmelCase : List[Any] ): if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) A_ = src_lang A_ = self(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ) A_ = self.convert_tokens_to_ids(UpperCAmelCase ) A_ = tgt_lang_id return inputs def __A ( self : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : str = "eng_Latn" , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : str = "fra_Latn" , **UpperCAmelCase : Optional[int] , ): A_ = src_lang A_ = tgt_lang return super().prepare_seqaseq_batch(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) def __A ( self : List[Any] ): return self.set_src_lang_special_tokens(self.src_lang ) def __A ( self : Tuple ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __A ( self : str , UpperCAmelCase : str ): A_ = self.convert_tokens_to_ids(UpperCAmelCase ) if self.legacy_behaviour: A_ = [] A_ = [self.eos_token_id, self.cur_lang_code] else: A_ = [self.cur_lang_code] A_ = [self.eos_token_id] A_ = self.convert_ids_to_tokens(self.prefix_tokens ) A_ = self.convert_ids_to_tokens(self.suffix_tokens ) A_ = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __A ( self : Optional[Any] , UpperCAmelCase : str ): A_ = self.convert_tokens_to_ids(UpperCAmelCase ) if self.legacy_behaviour: A_ = [] A_ = [self.eos_token_id, self.cur_lang_code] else: A_ = [self.cur_lang_code] A_ = [self.eos_token_id] A_ = self.convert_ids_to_tokens(self.prefix_tokens ) A_ = self.convert_ids_to_tokens(self.suffix_tokens ) A_ = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __A ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' ) return A_ = os.path.join( UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ): copyfile(self.vocab_file , UpperCAmelCase ) return (out_vocab_file,)
329
from __future__ import annotations def __snake_case ( __UpperCamelCase : int = 4 ): """simple docstring""" A_ = abs(__UpperCamelCase ) or 4 return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )] def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" return reverse_row(transpose(__UpperCamelCase ) ) # OR.. transpose(reverse_column(matrix)) def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" return reverse_row(reverse_column(__UpperCamelCase ) ) # OR.. reverse_column(reverse_row(matrix)) def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" return reverse_column(transpose(__UpperCamelCase ) ) # OR.. transpose(reverse_row(matrix)) def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" A_ = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )] return matrix def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" A_ = matrix[::-1] return matrix def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" A_ = [x[::-1] for x in matrix] return matrix def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" for i in matrix: print(*__UpperCamelCase ) if __name__ == "__main__": __a :Any = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 90 counterclockwise:\n') print_matrix(rotate_aa(matrix)) __a :Any = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 180:\n') print_matrix(rotate_aaa(matrix)) __a :Any = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 270 counterclockwise:\n') print_matrix(rotate_aaa(matrix))
329
1
import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : torch.FloatTensor _lowerCamelCase : Optional[torch.FloatTensor] = None def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Any=0.999 ,__UpperCamelCase : Any="cosine" ,): """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(__UpperCamelCase : Any ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(__UpperCamelCase : int ): return math.exp(t * -12.0 ) else: raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) A_ = [] for i in range(__UpperCamelCase ): A_ = i / num_diffusion_timesteps A_ = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(__UpperCamelCase ) / alpha_bar_fn(__UpperCamelCase ) ,__UpperCamelCase ) ) return torch.tensor(__UpperCamelCase ,dtype=torch.floataa ) class _a ( snake_case_ , snake_case_ ): """simple docstring""" @register_to_config def __init__( self : Optional[int] , UpperCAmelCase : int = 1000 , UpperCAmelCase : str = "fixed_small_log" , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[float] = 1.0 , UpperCAmelCase : str = "epsilon" , UpperCAmelCase : str = "squaredcos_cap_v2" , ): if beta_schedule != "squaredcos_cap_v2": raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" ) A_ = betas_for_alpha_bar(UpperCAmelCase ) A_ = 1.0 - self.betas A_ = torch.cumprod(self.alphas , dim=0 ) A_ = torch.tensor(1.0 ) # standard deviation of the initial noise distribution A_ = 1.0 # setable values A_ = None A_ = torch.from_numpy(np.arange(0 , UpperCAmelCase )[::-1].copy() ) A_ = variance_type def __A ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None ): return sample def __A ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, torch.device] = None ): A_ = num_inference_steps A_ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) A_ = (np.arange(0 , UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa ) A_ = torch.from_numpy(UpperCAmelCase ).to(UpperCAmelCase ) def __A ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str=None , UpperCAmelCase : Any=None , UpperCAmelCase : List[Any]=None ): if prev_timestep is None: A_ = t - 1 A_ = self.alphas_cumprod[t] A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one A_ = 1 - alpha_prod_t A_ = 1 - alpha_prod_t_prev if prev_timestep == t - 1: A_ = self.betas[t] else: A_ = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample A_ = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: A_ = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": A_ = torch.log(torch.clamp(UpperCAmelCase , min=1E-20 ) ) A_ = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler A_ = variance.log() A_ = beta.log() A_ = (predicted_variance + 1) / 2 A_ = frac * max_log + (1 - frac) * min_log return variance def __A ( self : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Dict=None , UpperCAmelCase : bool = True , ): A_ = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": A_ , A_ = torch.split(UpperCAmelCase , sample.shape[1] , dim=1 ) else: A_ = None # 1. compute alphas, betas if prev_timestep is None: A_ = t - 1 A_ = self.alphas_cumprod[t] A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one A_ = 1 - alpha_prod_t A_ = 1 - alpha_prod_t_prev if prev_timestep == t - 1: A_ = self.betas[t] A_ = self.alphas[t] else: A_ = 1 - alpha_prod_t / alpha_prod_t_prev A_ = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": A_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": A_ = model_output else: raise ValueError( f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`''' " for the UnCLIPScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: A_ = torch.clamp( UpperCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf A_ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t A_ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf A_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise A_ = 0 if t > 0: A_ = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=UpperCAmelCase , device=model_output.device ) A_ = self._get_variance( UpperCAmelCase , predicted_variance=UpperCAmelCase , prev_timestep=UpperCAmelCase , ) if self.variance_type == "fixed_small_log": A_ = variance elif self.variance_type == "learned_range": A_ = (0.5 * variance).exp() else: raise ValueError( f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`''' " for the UnCLIPScheduler." ) A_ = variance * variance_noise A_ = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=UpperCAmelCase , pred_original_sample=UpperCAmelCase ) def __A ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.IntTensor , ): # Make sure alphas_cumprod and timestep have same device and dtype as original_samples A_ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) A_ = timesteps.to(original_samples.device ) A_ = alphas_cumprod[timesteps] ** 0.5 A_ = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): A_ = sqrt_alpha_prod.unsqueeze(-1 ) A_ = (1 - alphas_cumprod[timesteps]) ** 0.5 A_ = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): A_ = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) A_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
329
from ..utils import DummyObject, requires_backends class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Union[str, Any] = ['torch', 'transformers', 'onnx'] def __init__( self : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Tuple , *UpperCAmelCase : Tuple , **UpperCAmelCase : Union[str, Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Dict , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Tuple ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Tuple = ['torch', 'transformers', 'onnx'] def __init__( self : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : List[Any] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Tuple , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Any = ['torch', 'transformers', 'onnx'] def __init__( self : Dict , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Optional[int] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : List[str] = ['torch', 'transformers', 'onnx'] def __init__( self : List[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : int ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Any , *UpperCAmelCase : List[Any] , **UpperCAmelCase : str ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Dict = ['torch', 'transformers', 'onnx'] def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : Tuple ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : Dict ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : int , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : List[str] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : List[Any] = ['torch', 'transformers', 'onnx'] def __init__( self : str , *UpperCAmelCase : str , **UpperCAmelCase : List[Any] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : List[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "transformers", "onnx"] )
329
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a :int = { 'configuration_instructblip': [ 'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'InstructBlipConfig', 'InstructBlipQFormerConfig', 'InstructBlipVisionConfig', ], 'processing_instructblip': ['InstructBlipProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :List[str] = [ 'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'InstructBlipQFormerModel', 'InstructBlipPreTrainedModel', 'InstructBlipForConditionalGeneration', 'InstructBlipVisionModel', ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys __a :List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
329
import itertools import math def __snake_case ( __UpperCamelCase : int ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 ,int(math.sqrt(__UpperCamelCase ) + 1 ) ,6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __snake_case ( ): """simple docstring""" A_ = 2 while True: if is_prime(__UpperCamelCase ): yield num num += 1 def __snake_case ( __UpperCamelCase : int = 1_0001 ): """simple docstring""" return next(itertools.islice(prime_generator() ,nth - 1 ,__UpperCamelCase ) ) if __name__ == "__main__": print(F"{solution() = }")
329
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __a :Union[str, Any] = { 'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'], 'tokenization_biogpt': ['BioGptTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Optional[int] = [ 'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BioGptForCausalLM', 'BioGptForTokenClassification', 'BioGptForSequenceClassification', 'BioGptModel', 'BioGptPreTrainedModel', ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys __a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
329
from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class _a : """simple docstring""" def __init__( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : List[str]=13 , UpperCAmelCase : Tuple=7 , UpperCAmelCase : int=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Optional[Any]=99 , UpperCAmelCase : str=32 , UpperCAmelCase : Dict=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Optional[int]=37 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : int=16 , UpperCAmelCase : Any=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : List[Any]=None , ): A_ = parent A_ = 13 A_ = 7 A_ = True A_ = True A_ = True A_ = True A_ = 99 A_ = 384 A_ = 2 A_ = 4 A_ = 37 A_ = "gelu" A_ = 0.1 A_ = 0.1 A_ = 512 A_ = 16 A_ = 2 A_ = 0.02 A_ = 3 A_ = 4 A_ = 128 A_ = 2 A_ = 9 A_ = 1 A_ = None def __A ( self : Optional[int] ): A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ = None if self.use_input_mask: A_ = random_attention_mask([self.batch_size, self.seq_length] ) A_ = None if self.use_token_type_ids: A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A_ = None A_ = None A_ = None if self.use_labels: A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A_ = ids_tensor([self.batch_size] , self.num_choices ) A_ = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ): A_ = TFConvBertModel(config=UpperCAmelCase ) A_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} A_ = [input_ids, input_mask] A_ = model(UpperCAmelCase ) A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Tuple ): A_ = TFConvBertForMaskedLM(config=UpperCAmelCase ) A_ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : int ): A_ = self.num_labels A_ = TFConvBertForSequenceClassification(config=UpperCAmelCase ) A_ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ): A_ = self.num_choices A_ = TFConvBertForMultipleChoice(config=UpperCAmelCase ) A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) A_ = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __A ( self : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str ): A_ = self.num_labels A_ = TFConvBertForTokenClassification(config=UpperCAmelCase ) A_ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __A ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ): A_ = TFConvBertForQuestionAnswering(config=UpperCAmelCase ) A_ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __A ( self : List[str] ): A_ = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) = config_and_inputs A_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class _a ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : Union[str, Any] = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) _lowerCamelCase : Any = ( { 'feature-extraction': TFConvBertModel, 'fill-mask': TFConvBertForMaskedLM, 'question-answering': TFConvBertForQuestionAnswering, 'text-classification': TFConvBertForSequenceClassification, 'token-classification': TFConvBertForTokenClassification, 'zero-shot': TFConvBertForSequenceClassification, } if is_tf_available() else {} ) _lowerCamelCase : Dict = False _lowerCamelCase : Optional[int] = False _lowerCamelCase : Dict = False def __A ( self : List[str] ): A_ = TFConvBertModelTester(self ) A_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 ) def __A ( self : Tuple ): self.config_tester.run_common_tests() def __A ( self : Tuple ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase ) def __A ( self : Dict ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase ) def __A ( self : List[Any] ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase ) def __A ( self : Dict ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase ) def __A ( self : int ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase ) def __A ( self : List[Any] ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase ) @slow def __A ( self : str ): A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() A_ = True A_ = True if hasattr(UpperCAmelCase , "use_cache" ): A_ = True A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase ) for model_class in self.all_model_classes: A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) A_ = model_class(UpperCAmelCase ) A_ = len(model(UpperCAmelCase ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCAmelCase , saved_model=UpperCAmelCase ) A_ = os.path.join(UpperCAmelCase , "saved_model" , "1" ) A_ = tf.keras.models.load_model(UpperCAmelCase ) A_ = model(UpperCAmelCase ) if self.is_encoder_decoder: A_ = outputs["encoder_hidden_states"] A_ = outputs["encoder_attentions"] else: A_ = outputs["hidden_states"] A_ = outputs["attentions"] self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase ) A_ = getattr( self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def __A ( self : List[str] ): A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) self.assertIsNotNone(UpperCAmelCase ) def __A ( self : Any ): A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() A_ = True A_ = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length ) A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase ) A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase ) def check_decoder_attentions_output(UpperCAmelCase : Optional[int] ): A_ = len(UpperCAmelCase ) self.assertEqual(out_len % 2 , 0 ) A_ = outputs.decoder_attentions self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(UpperCAmelCase : Optional[Any] ): A_ = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: A_ = True A_ = False A_ = model_class(UpperCAmelCase ) A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) A_ = len(UpperCAmelCase ) self.assertEqual(config.output_hidden_states , UpperCAmelCase ) check_encoder_attentions_output(UpperCAmelCase ) if self.is_encoder_decoder: A_ = model_class(UpperCAmelCase ) A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , UpperCAmelCase ) check_decoder_attentions_output(UpperCAmelCase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] A_ = True A_ = model_class(UpperCAmelCase ) A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , UpperCAmelCase ) check_encoder_attentions_output(UpperCAmelCase ) # Check attention is always last and order is fine A_ = True A_ = True A_ = model_class(UpperCAmelCase ) A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase ) ) self.assertEqual(model.config.output_hidden_states , UpperCAmelCase ) check_encoder_attentions_output(UpperCAmelCase ) @require_tf class _a ( unittest.TestCase ): """simple docstring""" @slow def __A ( self : Dict ): A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) A_ = tf.constant([[0, 1, 2, 3, 4, 5]] ) A_ = model(UpperCAmelCase )[0] A_ = [1, 6, 768] self.assertEqual(output.shape , UpperCAmelCase ) A_ = tf.constant( [ [ [-0.03_475_493, -0.4_686_034, -0.30_638_832], [0.22_637_248, -0.26_988_646, -0.7_423_424], [0.10_324_868, -0.45_013_508, -0.58_280_784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-4 )
329
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __a :Tuple = { 'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Tuple = ['MobileViTFeatureExtractor'] __a :int = ['MobileViTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Optional[Any] = [ 'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MobileViTForImageClassification', 'MobileViTForSemanticSegmentation', 'MobileViTModel', 'MobileViTPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :int = [ 'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFMobileViTForImageClassification', 'TFMobileViTForSemanticSegmentation', 'TFMobileViTModel', 'TFMobileViTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys __a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
329
from ...configuration_utils import PretrainedConfig from ...utils import logging __a :Dict = logging.get_logger(__name__) __a :int = { 'google/realm-cc-news-pretrained-embedder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-encoder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-scorer': ( 'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-openqa': ( 'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json' ), 'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json', 'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json', 'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json', 'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json', # See all REALM models at https://huggingface.co/models?filter=realm } class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : List[Any] = 'realm' def __init__( self : Union[str, Any] , UpperCAmelCase : Optional[Any]=30522 , UpperCAmelCase : List[str]=768 , UpperCAmelCase : Optional[Any]=128 , UpperCAmelCase : str=12 , UpperCAmelCase : Dict=12 , UpperCAmelCase : Optional[Any]=8 , UpperCAmelCase : Any=3072 , UpperCAmelCase : Union[str, Any]="gelu_new" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : int=512 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=1E-12 , UpperCAmelCase : List[Any]=256 , UpperCAmelCase : Optional[int]=10 , UpperCAmelCase : List[str]=1E-3 , UpperCAmelCase : Any=5 , UpperCAmelCase : List[Any]=320 , UpperCAmelCase : Optional[Any]=13353718 , UpperCAmelCase : Tuple=5000 , UpperCAmelCase : List[str]=1 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Union[str, Any]=2 , **UpperCAmelCase : List[str] , ): super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase ) # Common config A_ = vocab_size A_ = max_position_embeddings A_ = hidden_size A_ = retriever_proj_size A_ = num_hidden_layers A_ = num_attention_heads A_ = num_candidates A_ = intermediate_size A_ = hidden_act A_ = hidden_dropout_prob A_ = attention_probs_dropout_prob A_ = initializer_range A_ = type_vocab_size A_ = layer_norm_eps # Reader config A_ = span_hidden_size A_ = max_span_width A_ = reader_layer_norm_eps A_ = reader_beam_size A_ = reader_seq_len # Retrieval config A_ = num_block_records A_ = searcher_beam_size
329
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __a :Optional[Any] = logging.get_logger(__name__) __a :str = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'} class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Optional[int] = 'openai-gpt' _lowerCamelCase : Optional[Any] = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : List[Any] , UpperCAmelCase : Union[str, Any]=40478 , UpperCAmelCase : str=512 , UpperCAmelCase : List[Any]=768 , UpperCAmelCase : List[str]=12 , UpperCAmelCase : List[Any]=12 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : List[Any]=1E-5 , UpperCAmelCase : Optional[int]=0.02 , UpperCAmelCase : Dict="cls_index" , UpperCAmelCase : Dict=True , UpperCAmelCase : Dict=None , UpperCAmelCase : int=True , UpperCAmelCase : Union[str, Any]=0.1 , **UpperCAmelCase : str , ): A_ = vocab_size A_ = n_positions A_ = n_embd A_ = n_layer A_ = n_head A_ = afn A_ = resid_pdrop A_ = embd_pdrop A_ = attn_pdrop A_ = layer_norm_epsilon A_ = initializer_range A_ = summary_type A_ = summary_use_proj A_ = summary_activation A_ = summary_first_dropout A_ = summary_proj_to_labels super().__init__(**UpperCAmelCase )
329
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() __a :Optional[Any] = logging.get_logger(__name__) def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ): """simple docstring""" A_ = original_name.split("." )[0] A_ = key.split("." ) A_ = int(key_list[key_list.index(__UpperCamelCase ) - 2] ) A_ = int(key_list[key_list.index(__UpperCamelCase ) - 1] ) A_ = orig_block_num - offset A_ = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' ,f'''block.{new_block_num}.{layer_num}.{new_name}''' ) return key def __snake_case ( __UpperCamelCase : Any ): """simple docstring""" A_ = OrderedDict() A_ , A_ = 0, 0 for key, value in state_dict.items(): if key.startswith("network" ): A_ = key.replace("network" ,"poolformer.encoder" ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith("bias" ) and "patch_embed" not in key: patch_emb_offset += 1 A_ = key[: key.find("proj" )] A_ = key.replace(__UpperCamelCase ,f'''patch_embeddings.{total_embed_found}.''' ) A_ = key.replace("proj" ,"projection" ) if key.endswith("bias" ): total_embed_found += 1 if "patch_embeddings" in key: A_ = "poolformer.encoder." + key if "mlp.fc1" in key: A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc1" ,"output.conv1" ) if "mlp.fc2" in key: A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc2" ,"output.conv2" ) if "norm1" in key: A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm1" ,"before_norm" ) if "norm2" in key: A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm2" ,"after_norm" ) if "layer_scale_1" in key: A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_1" ,"layer_scale_1" ) if "layer_scale_2" in key: A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_2" ,"layer_scale_2" ) if "head" in key: A_ = key.replace("head" ,"classifier" ) A_ = value return new_state_dict def __snake_case ( ): """simple docstring""" A_ = "http://images.cocodataset.org/val2017/000000039769.jpg" A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw ) return image @torch.no_grad() def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ): """simple docstring""" A_ = PoolFormerConfig() # set attributes based on model_name A_ = "huggingface/label-files" A_ = model_name[-3:] A_ = 1000 A_ = "imagenet-1k-id2label.json" A_ = (1, 1000) # set config attributes A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) ) A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()} A_ = idalabel A_ = {v: k for k, v in idalabel.items()} if size == "s12": A_ = [2, 2, 6, 2] A_ = [64, 128, 320, 512] A_ = 4.0 A_ = 0.9 elif size == "s24": A_ = [4, 4, 12, 4] A_ = [64, 128, 320, 512] A_ = 4.0 A_ = 0.9 elif size == "s36": A_ = [6, 6, 18, 6] A_ = [64, 128, 320, 512] A_ = 4.0 A_ = 1E-6 A_ = 0.9 elif size == "m36": A_ = [6, 6, 18, 6] A_ = [96, 192, 384, 768] A_ = 4.0 A_ = 1E-6 A_ = 0.95 elif size == "m48": A_ = [8, 8, 24, 8] A_ = [96, 192, 384, 768] A_ = 4.0 A_ = 1E-6 A_ = 0.95 else: raise ValueError(f'''Size {size} not supported''' ) # load image processor A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase ) # Prepare image A_ = prepare_img() A_ = image_processor(images=__UpperCamelCase ,return_tensors="pt" ).pixel_values logger.info(f'''Converting model {model_name}...''' ) # load original state dict A_ = torch.load(__UpperCamelCase ,map_location=torch.device("cpu" ) ) # rename keys A_ = rename_keys(__UpperCamelCase ) # create HuggingFace model and load state dict A_ = PoolFormerForImageClassification(__UpperCamelCase ) model.load_state_dict(__UpperCamelCase ) model.eval() # Define image processor A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase ) A_ = image_processor(images=prepare_img() ,return_tensors="pt" ).pixel_values # forward pass A_ = model(__UpperCamelCase ) A_ = outputs.logits # define expected logit slices for different models if size == "s12": A_ = torch.tensor([-0.3045, -0.6758, -0.4869] ) elif size == "s24": A_ = torch.tensor([0.4402, -0.1374, -0.8045] ) elif size == "s36": A_ = torch.tensor([-0.6080, -0.5133, -0.5898] ) elif size == "m36": A_ = torch.tensor([0.3952, 0.2263, -1.2668] ) elif size == "m48": A_ = torch.tensor([0.1167, -0.0656, -0.3423] ) else: raise ValueError(f'''Size {size} not supported''' ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1E-2 ) # finally, save model and image processor logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) model.save_pretrained(__UpperCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __a :Union[str, Any] = argparse.ArgumentParser() parser.add_argument( '--model_name', default='poolformer_s12', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) __a :int = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
329
1
from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def __snake_case ( __UpperCamelCase : int ): """simple docstring""" def is_in_circle(__UpperCamelCase : float ,__UpperCamelCase : float ) -> bool: A_ = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle A_ = mean( int(is_in_circle(uniform(-1.0 ,1.0 ) ,uniform(-1.0 ,1.0 ) ) ) for _ in range(__UpperCamelCase ) ) # The ratio of the area for circle to square is pi/4. A_ = proportion * 4 print(f'''The estimated value of pi is {pi_estimate}''' ) print(f'''The numpy value of pi is {pi}''' ) print(f'''The total error is {abs(pi - pi_estimate )}''' ) def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : Callable[[float], float] ,__UpperCamelCase : float = 0.0 ,__UpperCamelCase : float = 1.0 ,): """simple docstring""" return mean( function_to_integrate(uniform(__UpperCamelCase ,__UpperCamelCase ) ) for _ in range(__UpperCamelCase ) ) * (max_value - min_value) def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : float = 0.0 ,__UpperCamelCase : float = 1.0 ): """simple docstring""" def identity_function(__UpperCamelCase : float ) -> float: return x A_ = area_under_curve_estimator( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) A_ = (max_value * max_value - min_value * min_value) / 2 print("******************" ) print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' ) print(f'''Estimated value is {estimated_value}''' ) print(f'''Expected value is {expected_value}''' ) print(f'''Total error is {abs(estimated_value - expected_value )}''' ) print("******************" ) def __snake_case ( __UpperCamelCase : int ): """simple docstring""" def function_to_integrate(__UpperCamelCase : float ) -> float: return sqrt(4.0 - x * x ) A_ = area_under_curve_estimator( __UpperCamelCase ,__UpperCamelCase ,0.0 ,2.0 ) print("******************" ) print("Estimating pi using area_under_curve_estimator" ) print(f'''Estimated value is {estimated_value}''' ) print(f'''Expected value is {pi}''' ) print(f'''Total error is {abs(estimated_value - pi )}''' ) print("******************" ) if __name__ == "__main__": import doctest doctest.testmod()
329
import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : torch.FloatTensor _lowerCamelCase : Optional[torch.FloatTensor] = None def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Any=0.999 ,__UpperCamelCase : Any="cosine" ,): """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(__UpperCamelCase : Any ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(__UpperCamelCase : int ): return math.exp(t * -12.0 ) else: raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) A_ = [] for i in range(__UpperCamelCase ): A_ = i / num_diffusion_timesteps A_ = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(__UpperCamelCase ) / alpha_bar_fn(__UpperCamelCase ) ,__UpperCamelCase ) ) return torch.tensor(__UpperCamelCase ,dtype=torch.floataa ) class _a ( snake_case_ , snake_case_ ): """simple docstring""" @register_to_config def __init__( self : Optional[int] , UpperCAmelCase : int = 1000 , UpperCAmelCase : str = "fixed_small_log" , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[float] = 1.0 , UpperCAmelCase : str = "epsilon" , UpperCAmelCase : str = "squaredcos_cap_v2" , ): if beta_schedule != "squaredcos_cap_v2": raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" ) A_ = betas_for_alpha_bar(UpperCAmelCase ) A_ = 1.0 - self.betas A_ = torch.cumprod(self.alphas , dim=0 ) A_ = torch.tensor(1.0 ) # standard deviation of the initial noise distribution A_ = 1.0 # setable values A_ = None A_ = torch.from_numpy(np.arange(0 , UpperCAmelCase )[::-1].copy() ) A_ = variance_type def __A ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None ): return sample def __A ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, torch.device] = None ): A_ = num_inference_steps A_ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) A_ = (np.arange(0 , UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa ) A_ = torch.from_numpy(UpperCAmelCase ).to(UpperCAmelCase ) def __A ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str=None , UpperCAmelCase : Any=None , UpperCAmelCase : List[Any]=None ): if prev_timestep is None: A_ = t - 1 A_ = self.alphas_cumprod[t] A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one A_ = 1 - alpha_prod_t A_ = 1 - alpha_prod_t_prev if prev_timestep == t - 1: A_ = self.betas[t] else: A_ = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample A_ = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: A_ = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": A_ = torch.log(torch.clamp(UpperCAmelCase , min=1E-20 ) ) A_ = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler A_ = variance.log() A_ = beta.log() A_ = (predicted_variance + 1) / 2 A_ = frac * max_log + (1 - frac) * min_log return variance def __A ( self : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Dict=None , UpperCAmelCase : bool = True , ): A_ = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": A_ , A_ = torch.split(UpperCAmelCase , sample.shape[1] , dim=1 ) else: A_ = None # 1. compute alphas, betas if prev_timestep is None: A_ = t - 1 A_ = self.alphas_cumprod[t] A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one A_ = 1 - alpha_prod_t A_ = 1 - alpha_prod_t_prev if prev_timestep == t - 1: A_ = self.betas[t] A_ = self.alphas[t] else: A_ = 1 - alpha_prod_t / alpha_prod_t_prev A_ = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": A_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": A_ = model_output else: raise ValueError( f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`''' " for the UnCLIPScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: A_ = torch.clamp( UpperCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf A_ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t A_ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf A_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise A_ = 0 if t > 0: A_ = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=UpperCAmelCase , device=model_output.device ) A_ = self._get_variance( UpperCAmelCase , predicted_variance=UpperCAmelCase , prev_timestep=UpperCAmelCase , ) if self.variance_type == "fixed_small_log": A_ = variance elif self.variance_type == "learned_range": A_ = (0.5 * variance).exp() else: raise ValueError( f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`''' " for the UnCLIPScheduler." ) A_ = variance * variance_noise A_ = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=UpperCAmelCase , pred_original_sample=UpperCAmelCase ) def __A ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.IntTensor , ): # Make sure alphas_cumprod and timestep have same device and dtype as original_samples A_ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) A_ = timesteps.to(original_samples.device ) A_ = alphas_cumprod[timesteps] ** 0.5 A_ = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): A_ = sqrt_alpha_prod.unsqueeze(-1 ) A_ = (1 - alphas_cumprod[timesteps]) ** 0.5 A_ = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): A_ = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) A_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
329
1
from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class _a ( nn.Module ): """simple docstring""" def __init__( self : Dict , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : List[str]=0.0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : str = "geglu" , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : str = "layer_norm" , UpperCAmelCase : bool = False , ): super().__init__() A_ = only_cross_attention A_ = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero" A_ = (num_embeds_ada_norm is not None) and norm_type == "ada_norm" if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to''' f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' ) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: A_ = AdaLayerNorm(UpperCAmelCase , UpperCAmelCase ) elif self.use_ada_layer_norm_zero: A_ = AdaLayerNormZero(UpperCAmelCase , UpperCAmelCase ) else: A_ = nn.LayerNorm(UpperCAmelCase , elementwise_affine=UpperCAmelCase ) A_ = Attention( query_dim=UpperCAmelCase , heads=UpperCAmelCase , dim_head=UpperCAmelCase , dropout=UpperCAmelCase , bias=UpperCAmelCase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=UpperCAmelCase , ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. A_ = ( AdaLayerNorm(UpperCAmelCase , UpperCAmelCase ) if self.use_ada_layer_norm else nn.LayerNorm(UpperCAmelCase , elementwise_affine=UpperCAmelCase ) ) A_ = Attention( query_dim=UpperCAmelCase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=UpperCAmelCase , dim_head=UpperCAmelCase , dropout=UpperCAmelCase , bias=UpperCAmelCase , upcast_attention=UpperCAmelCase , ) # is self-attn if encoder_hidden_states is none else: A_ = None A_ = None # 3. Feed-forward A_ = nn.LayerNorm(UpperCAmelCase , elementwise_affine=UpperCAmelCase ) A_ = FeedForward(UpperCAmelCase , dropout=UpperCAmelCase , activation_fn=UpperCAmelCase , final_dropout=UpperCAmelCase ) # let chunk size default to None A_ = None A_ = 0 def __A ( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ): # Sets chunk feed-forward A_ = chunk_size A_ = dim def __A ( self : Any , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[torch.LongTensor] = None , UpperCAmelCase : Dict[str, Any] = None , UpperCAmelCase : Optional[torch.LongTensor] = None , ): # Notice that normalization is always applied before the real computation in the following blocks. # 1. Self-Attention if self.use_ada_layer_norm: A_ = self.norma(UpperCAmelCase , UpperCAmelCase ) elif self.use_ada_layer_norm_zero: A_ , A_ , A_ , A_ , A_ = self.norma( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hidden_dtype=hidden_states.dtype ) else: A_ = self.norma(UpperCAmelCase ) A_ = cross_attention_kwargs if cross_attention_kwargs is not None else {} A_ = self.attna( UpperCAmelCase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=UpperCAmelCase , **UpperCAmelCase , ) if self.use_ada_layer_norm_zero: A_ = gate_msa.unsqueeze(1 ) * attn_output A_ = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: A_ = ( self.norma(UpperCAmelCase , UpperCAmelCase ) if self.use_ada_layer_norm else self.norma(UpperCAmelCase ) ) A_ = self.attna( UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , attention_mask=UpperCAmelCase , **UpperCAmelCase , ) A_ = attn_output + hidden_states # 3. Feed-forward A_ = self.norma(UpperCAmelCase ) if self.use_ada_layer_norm_zero: A_ = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' ) A_ = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size A_ = torch.cat( [self.ff(UpperCAmelCase ) for hid_slice in norm_hidden_states.chunk(UpperCAmelCase , dim=self._chunk_dim )] , dim=self._chunk_dim , ) else: A_ = self.ff(UpperCAmelCase ) if self.use_ada_layer_norm_zero: A_ = gate_mlp.unsqueeze(1 ) * ff_output A_ = ff_output + hidden_states return hidden_states class _a ( nn.Module ): """simple docstring""" def __init__( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 4 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : str = "geglu" , UpperCAmelCase : bool = False , ): super().__init__() A_ = int(dim * mult ) A_ = dim_out if dim_out is not None else dim if activation_fn == "gelu": A_ = GELU(UpperCAmelCase , UpperCAmelCase ) if activation_fn == "gelu-approximate": A_ = GELU(UpperCAmelCase , UpperCAmelCase , approximate="tanh" ) elif activation_fn == "geglu": A_ = GEGLU(UpperCAmelCase , UpperCAmelCase ) elif activation_fn == "geglu-approximate": A_ = ApproximateGELU(UpperCAmelCase , UpperCAmelCase ) A_ = nn.ModuleList([] ) # project in self.net.append(UpperCAmelCase ) # project dropout self.net.append(nn.Dropout(UpperCAmelCase ) ) # project out self.net.append(nn.Linear(UpperCAmelCase , UpperCAmelCase ) ) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(UpperCAmelCase ) ) def __A ( self : Dict , UpperCAmelCase : int ): for module in self.net: A_ = module(UpperCAmelCase ) return hidden_states class _a ( nn.Module ): """simple docstring""" def __init__( self : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : str = "none" ): super().__init__() A_ = nn.Linear(UpperCAmelCase , UpperCAmelCase ) A_ = approximate def __A ( self : str , UpperCAmelCase : Dict ): if gate.device.type != "mps": return F.gelu(UpperCAmelCase , approximate=self.approximate ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype ) def __A ( self : List[Any] , UpperCAmelCase : Any ): A_ = self.proj(UpperCAmelCase ) A_ = self.gelu(UpperCAmelCase ) return hidden_states class _a ( nn.Module ): """simple docstring""" def __init__( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : int ): super().__init__() A_ = nn.Linear(UpperCAmelCase , dim_out * 2 ) def __A ( self : Tuple , UpperCAmelCase : Tuple ): if gate.device.type != "mps": return F.gelu(UpperCAmelCase ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype ) def __A ( self : int , UpperCAmelCase : str ): A_ , A_ = self.proj(UpperCAmelCase ).chunk(2 , dim=-1 ) return hidden_states * self.gelu(UpperCAmelCase ) class _a ( nn.Module ): """simple docstring""" def __init__( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : int ): super().__init__() A_ = nn.Linear(UpperCAmelCase , UpperCAmelCase ) def __A ( self : int , UpperCAmelCase : int ): A_ = self.proj(UpperCAmelCase ) return x * torch.sigmoid(1.702 * x ) class _a ( nn.Module ): """simple docstring""" def __init__( self : int , UpperCAmelCase : List[Any] , UpperCAmelCase : int ): super().__init__() A_ = nn.Embedding(UpperCAmelCase , UpperCAmelCase ) A_ = nn.SiLU() A_ = nn.Linear(UpperCAmelCase , embedding_dim * 2 ) A_ = nn.LayerNorm(UpperCAmelCase , elementwise_affine=UpperCAmelCase ) def __A ( self : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Any ): A_ = self.linear(self.silu(self.emb(UpperCAmelCase ) ) ) A_ , A_ = torch.chunk(UpperCAmelCase , 2 ) A_ = self.norm(UpperCAmelCase ) * (1 + scale) + shift return x class _a ( nn.Module ): """simple docstring""" def __init__( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[Any] ): super().__init__() A_ = CombinedTimestepLabelEmbeddings(UpperCAmelCase , UpperCAmelCase ) A_ = nn.SiLU() A_ = nn.Linear(UpperCAmelCase , 6 * embedding_dim , bias=UpperCAmelCase ) A_ = nn.LayerNorm(UpperCAmelCase , elementwise_affine=UpperCAmelCase , eps=1E-6 ) def __A ( self : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int]=None ): A_ = self.linear(self.silu(self.emb(UpperCAmelCase , UpperCAmelCase , hidden_dtype=UpperCAmelCase ) ) ) A_ , A_ , A_ , A_ , A_ , A_ = emb.chunk(6 , dim=1 ) A_ = self.norm(UpperCAmelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class _a ( nn.Module ): """simple docstring""" def __init__( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : float = 1E-5 ): super().__init__() A_ = num_groups A_ = eps if act_fn is None: A_ = None else: A_ = get_activation(UpperCAmelCase ) A_ = nn.Linear(UpperCAmelCase , out_dim * 2 ) def __A ( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any ): if self.act: A_ = self.act(UpperCAmelCase ) A_ = self.linear(UpperCAmelCase ) A_ = emb[:, :, None, None] A_ , A_ = emb.chunk(2 , dim=1 ) A_ = F.group_norm(UpperCAmelCase , self.num_groups , eps=self.eps ) A_ = x * (1 + scale) + shift return x
329
from math import isqrt, loga def __snake_case ( __UpperCamelCase : int ): """simple docstring""" A_ = [True] * max_number for i in range(2 ,isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 ,__UpperCamelCase ,__UpperCamelCase ): A_ = False return [i for i in range(2 ,__UpperCamelCase ) if is_prime[i]] def __snake_case ( __UpperCamelCase : int = 80_0800 ,__UpperCamelCase : int = 80_0800 ): """simple docstring""" A_ = degree * loga(__UpperCamelCase ) A_ = int(__UpperCamelCase ) A_ = calculate_prime_numbers(__UpperCamelCase ) A_ = 0 A_ = 0 A_ = len(__UpperCamelCase ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(F"{solution() = }")
329
1
import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() __a :Any = logging.get_logger(__name__) __a :Dict = 'https://openaipublic.azureedge.net/jukebox/models/' __a :List[str] = { 'jukebox-1b-lyrics': [ '5b/vqvae.pth.tar', '5b/prior_level_0.pth.tar', '5b/prior_level_1.pth.tar', '1b_lyrics/prior_level_2.pth.tar', ], 'jukebox-5b-lyrics': [ '5b/vqvae.pth.tar', '5b/prior_level_0.pth.tar', '5b/prior_level_1.pth.tar', '5b_lyrics/prior_level_2.pth.tar', ], } def __snake_case ( __UpperCamelCase : str ): """simple docstring""" if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10: A_ = key.replace(".model.1.bias" ,".conv1d_1.bias" ) elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10: A_ = key.replace(".model.1.weight" ,".conv1d_1.weight" ) elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10: A_ = key.replace(".model.3.bias" ,".conv1d_2.bias" ) elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10: A_ = key.replace(".model.3.weight" ,".conv1d_2.weight" ) if "conditioner_blocks.0." in key: A_ = key.replace("conditioner_blocks.0" ,"conditioner_blocks" ) if "prime_prior" in key: A_ = key.replace("prime_prior" ,"encoder" ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: A_ = key.replace(".emb." ,"." ) if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook return key.replace(".k" ,".codebook" ) if "y_emb." in key: return key.replace("y_emb." ,"metadata_embedding." ) if "x_emb.emb." in key: A_ = key.replace("0.x_emb.emb" ,"embed_tokens" ) if "prime_state_ln" in key: return key.replace("prime_state_ln" ,"encoder.final_layer_norm" ) if ".ln" in key: return key.replace(".ln" ,".layer_norm" ) if "_ln" in key: return key.replace("_ln" ,"_layer_norm" ) if "prime_state_proj" in key: return key.replace("prime_state_proj" ,"encoder.proj_in" ) if "prime_x_out" in key: return key.replace("prime_x_out" ,"encoder.lm_head" ) if "prior.x_out" in key: return key.replace("x_out" ,"fc_proj_out" ) if "x_emb" in key: return key.replace("x_emb" ,"embed_tokens" ) return key def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Dict ,__UpperCamelCase : Dict ): """simple docstring""" A_ = {} import re A_ = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" ) A_ = re.compile( R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" ) A_ = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" ) A_ = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" ) A_ = re.compile( R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" ) A_ = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" ) A_ = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" ) A_ = re.compile( R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" ) A_ = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(__UpperCamelCase ): A_ = re_encoder_block_conv_in.match(__UpperCamelCase ) A_ = regex_match.groups() A_ = int(groups[2] ) * 2 + int(groups[3] ) A_ = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}''' A_ = re_encoder_block_conv_in.sub(__UpperCamelCase ,__UpperCamelCase ) elif re_encoder_block_resnet.fullmatch(__UpperCamelCase ): A_ = re_encoder_block_resnet.match(__UpperCamelCase ) A_ = regex_match.groups() A_ = int(groups[2] ) * 2 + int(groups[3] ) A_ = {"1": 1, "3": 2}[groups[-2]] A_ = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.''' A_ = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' A_ = prefix + resnet_block A_ = re_encoder_block_resnet.sub(__UpperCamelCase ,__UpperCamelCase ) elif re_encoder_block_proj_out.fullmatch(__UpperCamelCase ): A_ = re_encoder_block_proj_out.match(__UpperCamelCase ) A_ = regex_match.groups() A_ = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}''' A_ = re_encoder_block_proj_out.sub(__UpperCamelCase ,__UpperCamelCase ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(__UpperCamelCase ): A_ = re_decoder_block_conv_out.match(__UpperCamelCase ) A_ = regex_match.groups() A_ = int(groups[2] ) * 2 + int(groups[3] ) - 2 A_ = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}''' A_ = re_decoder_block_conv_out.sub(__UpperCamelCase ,__UpperCamelCase ) elif re_decoder_block_resnet.fullmatch(__UpperCamelCase ): A_ = re_decoder_block_resnet.match(__UpperCamelCase ) A_ = regex_match.groups() A_ = int(groups[2] ) * 2 + int(groups[3] ) - 2 A_ = {"1": 1, "3": 2}[groups[-2]] A_ = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.''' A_ = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' A_ = prefix + resnet_block A_ = re_decoder_block_resnet.sub(__UpperCamelCase ,__UpperCamelCase ) elif re_decoder_block_proj_in.fullmatch(__UpperCamelCase ): A_ = re_decoder_block_proj_in.match(__UpperCamelCase ) A_ = regex_match.groups() A_ = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}''' A_ = re_decoder_block_proj_in.sub(__UpperCamelCase ,__UpperCamelCase ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(__UpperCamelCase ): A_ = re_prior_cond_conv_out.match(__UpperCamelCase ) A_ = regex_match.groups() A_ = int(groups[1] ) * 2 + int(groups[2] ) - 2 A_ = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}''' A_ = re_prior_cond_conv_out.sub(__UpperCamelCase ,__UpperCamelCase ) elif re_prior_cond_resnet.fullmatch(__UpperCamelCase ): A_ = re_prior_cond_resnet.match(__UpperCamelCase ) A_ = regex_match.groups() A_ = int(groups[1] ) * 2 + int(groups[2] ) - 2 A_ = {"1": 1, "3": 2}[groups[-2]] A_ = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.''' A_ = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' A_ = prefix + resnet_block A_ = re_prior_cond_resnet.sub(__UpperCamelCase ,__UpperCamelCase ) elif re_prior_cond_proj_in.fullmatch(__UpperCamelCase ): A_ = re_prior_cond_proj_in.match(__UpperCamelCase ) A_ = regex_match.groups() A_ = f'''conditioner_blocks.upsampler.proj_in.{groups[-1]}''' A_ = re_prior_cond_proj_in.sub(__UpperCamelCase ,__UpperCamelCase ) # keep original key else: A_ = original_key A_ = replace_key(__UpperCamelCase ) if f'''{key_prefix}.{key}''' not in model_state_dict or key is None: print(f'''failed converting {original_key} to {key}, does not match''' ) # handle missmatched shape elif value.shape != model_state_dict[f'''{key_prefix}.{key}'''].shape: A_ = model_state_dict[f'''{key_prefix}.{key}'''] print(f'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' ) A_ = original_key A_ = original_key A_ = value return new_dict @torch.no_grad() def __snake_case ( __UpperCamelCase : str=None ,__UpperCamelCase : int=None ): """simple docstring""" for file in MODEL_MAPPING[model_name]: if not os.path.isfile(f'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' ): A_ = requests.get(f'''{PREFIX}{file}''' ,allow_redirects=__UpperCamelCase ) os.makedirs(f'''{pytorch_dump_folder_path}/''' ,exist_ok=__UpperCamelCase ) open(f'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' ,"wb" ).write(r.content ) A_ = MODEL_MAPPING[model_name.split("/" )[-1]] A_ = JukeboxConfig.from_pretrained(__UpperCamelCase ) A_ = JukeboxModel(__UpperCamelCase ) A_ = [] A_ = {} for i, dict_name in enumerate(__UpperCamelCase ): A_ = torch.load(f'''{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}''' )["model"] A_ = {} for k in old_dic.keys(): if k.endswith(".b" ): A_ = old_dic[k] elif k.endswith(".w" ): A_ = old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: A_ = old_dic[k] else: A_ = old_dic[k] A_ = "vqvae" if i == 0 else f'''priors.{3 - i}''' A_ = fix_jukebox_keys(__UpperCamelCase ,model.state_dict() ,__UpperCamelCase ,__UpperCamelCase ) weight_dict.append(__UpperCamelCase ) A_ = weight_dict.pop(0 ) model.vqvae.load_state_dict(__UpperCamelCase ) for i in range(len(__UpperCamelCase ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) with open(f'''{pytorch_dump_folder_path}/mapping.json''' ,"w" ) as txtfile: json.dump(__UpperCamelCase ,__UpperCamelCase ) print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__UpperCamelCase ) return weight_dict if __name__ == "__main__": __a :List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='jukebox-5b-lyrics', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default='jukebox-5b-lyrics-converted', type=str, help='Path to the output PyTorch model directory.', ) __a :Tuple = parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
329
import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() __a :str = logging.get_logger(__name__) def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ): """simple docstring""" A_ = RobertaPreLayerNormConfig.from_pretrained( __UpperCamelCase ,architectures=["RobertaPreLayerNormForMaskedLM"] ) # convert state_dict A_ = torch.load(hf_hub_download(repo_id=__UpperCamelCase ,filename="pytorch_model.bin" ) ) A_ = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith("roberta." ): A_ = "roberta_prelayernorm." + tensor_key[len("roberta." ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ): continue A_ = tensor_value A_ = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=__UpperCamelCase ,config=__UpperCamelCase ,state_dict=__UpperCamelCase ) model.save_pretrained(__UpperCamelCase ) # convert tokenizer A_ = AutoTokenizer.from_pretrained(__UpperCamelCase ) tokenizer.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __a :Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint-repo', default=None, type=str, required=True, help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __a :Any = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
329
1
import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append('.') def __snake_case ( __UpperCamelCase : Dict ): """simple docstring""" A_ = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( "`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got " f'''{test_file} instead.''' ) A_ = components[-1] if not test_fn.endswith("py" ): raise ValueError(f'''`test_file` should be a python file. Got {test_fn} instead.''' ) if not test_fn.startswith("test_modeling_" ): raise ValueError( f'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' ) A_ = components[:-1] + [test_fn.replace(".py" ,"" )] A_ = ".".join(__UpperCamelCase ) return test_module_path def __snake_case ( __UpperCamelCase : Any ): """simple docstring""" A_ = get_module_path(__UpperCamelCase ) A_ = importlib.import_module(__UpperCamelCase ) return test_module def __snake_case ( __UpperCamelCase : Dict ): """simple docstring""" A_ = [] A_ = get_test_module(__UpperCamelCase ) for attr in dir(__UpperCamelCase ): if attr.endswith("ModelTester" ): tester_classes.append(getattr(__UpperCamelCase ,__UpperCamelCase ) ) # sort with class names return sorted(__UpperCamelCase ,key=lambda __UpperCamelCase : x.__name__ ) def __snake_case ( __UpperCamelCase : Union[str, Any] ): """simple docstring""" A_ = [] A_ = get_test_module(__UpperCamelCase ) for attr in dir(__UpperCamelCase ): A_ = getattr(__UpperCamelCase ,__UpperCamelCase ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). A_ = getattr(__UpperCamelCase ,"all_model_classes" ,[] ) if len(__UpperCamelCase ) > 0: test_classes.append(__UpperCamelCase ) # sort with class names return sorted(__UpperCamelCase ,key=lambda __UpperCamelCase : x.__name__ ) def __snake_case ( __UpperCamelCase : Tuple ): """simple docstring""" A_ = get_test_classes(__UpperCamelCase ) A_ = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(__UpperCamelCase ,key=lambda __UpperCamelCase : x.__name__ ) def __snake_case ( __UpperCamelCase : List[str] ): """simple docstring""" A_ = test_class() if hasattr(__UpperCamelCase ,"setUp" ): test.setUp() A_ = None if hasattr(__UpperCamelCase ,"model_tester" ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: A_ = test.model_tester.__class__ return model_tester def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ): """simple docstring""" A_ = get_test_classes(__UpperCamelCase ) A_ = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(__UpperCamelCase ) # sort with class names return sorted(__UpperCamelCase ,key=lambda __UpperCamelCase : x.__name__ ) def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Dict ): """simple docstring""" A_ = get_test_classes_for_model(__UpperCamelCase ,__UpperCamelCase ) A_ = [] for test_class in test_classes: A_ = get_model_tester_from_test_class(__UpperCamelCase ) if tester_class is not None: tester_classes.append(__UpperCamelCase ) # sort with class names return sorted(__UpperCamelCase ,key=lambda __UpperCamelCase : x.__name__ ) def __snake_case ( __UpperCamelCase : Tuple ): """simple docstring""" A_ = get_test_classes(__UpperCamelCase ) A_ = {test_class: get_model_tester_from_test_class(__UpperCamelCase ) for test_class in test_classes} return test_tester_mapping def __snake_case ( __UpperCamelCase : int ): """simple docstring""" A_ = get_model_classes(__UpperCamelCase ) A_ = { model_class: get_test_classes_for_model(__UpperCamelCase ,__UpperCamelCase ) for model_class in model_classes } return model_test_mapping def __snake_case ( __UpperCamelCase : Optional[int] ): """simple docstring""" A_ = get_model_classes(__UpperCamelCase ) A_ = { model_class: get_tester_classes_for_model(__UpperCamelCase ,__UpperCamelCase ) for model_class in model_classes } return model_to_tester_mapping def __snake_case ( __UpperCamelCase : List[str] ): """simple docstring""" if isinstance(__UpperCamelCase ,__UpperCamelCase ): return o elif isinstance(__UpperCamelCase ,__UpperCamelCase ): return o.__name__ elif isinstance(__UpperCamelCase ,(list, tuple) ): return [to_json(__UpperCamelCase ) for x in o] elif isinstance(__UpperCamelCase ,__UpperCamelCase ): return {to_json(__UpperCamelCase ): to_json(__UpperCamelCase ) for k, v in o.items()} else: return o
329
from maths.prime_factors import prime_factors def __snake_case ( __UpperCamelCase : int ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = f'''Input value of [number={number}] must be an integer''' raise TypeError(__UpperCamelCase ) if number < 1: raise ValueError("Input must be a positive integer" ) return -1 if len(prime_factors(__UpperCamelCase ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
329
1
import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _a ( snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : Optional[Any] = KandinskyVaaControlnetPipeline _lowerCamelCase : List[str] = ['image_embeds', 'negative_image_embeds', 'hint'] _lowerCamelCase : Any = ['image_embeds', 'negative_image_embeds', 'hint'] _lowerCamelCase : Optional[Any] = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] _lowerCamelCase : List[str] = False @property def __A ( self : List[Any] ): return 32 @property def __A ( self : Tuple ): return 32 @property def __A ( self : List[str] ): return self.time_input_dim @property def __A ( self : Tuple ): return self.time_input_dim * 4 @property def __A ( self : str ): return 100 @property def __A ( self : Tuple ): torch.manual_seed(0 ) A_ = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } A_ = UNetaDConditionModel(**UpperCAmelCase ) return model @property def __A ( self : Union[str, Any] ): return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __A ( self : Optional[Any] ): torch.manual_seed(0 ) A_ = VQModel(**self.dummy_movq_kwargs ) return model def __A ( self : str ): A_ = self.dummy_unet A_ = self.dummy_movq A_ = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=UpperCAmelCase , ) A_ = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __A ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any]=0 ): A_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase ) A_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( UpperCAmelCase ) # create hint A_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase ) if str(UpperCAmelCase ).startswith("mps" ): A_ = torch.manual_seed(UpperCAmelCase ) else: A_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase ) A_ = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def __A ( self : Optional[int] ): A_ = "cpu" A_ = self.get_dummy_components() A_ = self.pipeline_class(**UpperCAmelCase ) A_ = pipe.to(UpperCAmelCase ) pipe.set_progress_bar_config(disable=UpperCAmelCase ) A_ = pipe(**self.get_dummy_inputs(UpperCAmelCase ) ) A_ = output.images A_ = pipe( **self.get_dummy_inputs(UpperCAmelCase ) , return_dict=UpperCAmelCase , )[0] A_ = image[0, -3:, -3:, -1] A_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A_ = np.array( [0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : Any ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self : str ): A_ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy" ) A_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png" ) A_ = torch.from_numpy(np.array(UpperCAmelCase ) ).float() / 255.0 A_ = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) A_ = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(UpperCAmelCase ) A_ = KandinskyVaaControlnetPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa ) A_ = pipeline.to(UpperCAmelCase ) pipeline.set_progress_bar_config(disable=UpperCAmelCase ) A_ = "A robot, 4k photo" A_ = torch.Generator(device="cuda" ).manual_seed(0 ) A_ , A_ = pipe_prior( UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() A_ = torch.Generator(device="cuda" ).manual_seed(0 ) A_ = pipeline( image_embeds=UpperCAmelCase , negative_image_embeds=UpperCAmelCase , hint=UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=100 , output_type="np" , ) A_ = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
329
import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore __a :int = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" __a :Any = [file for file in filepaths if file != file.lower()] if upper_files: print(F"{len(upper_files)} files contain uppercase characters:") print('\n'.join(upper_files) + '\n') __a :Tuple = [file for file in filepaths if ' ' in file] if space_files: print(F"{len(space_files)} files contain space characters:") print('\n'.join(space_files) + '\n') __a :str = [file for file in filepaths if '-' in file] if hyphen_files: print(F"{len(hyphen_files)} files contain hyphen characters:") print('\n'.join(hyphen_files) + '\n') __a :List[str] = [file for file in filepaths if os.sep not in file] if nodir_files: print(F"{len(nodir_files)} files are not in a directory:") print('\n'.join(nodir_files) + '\n') __a :Any = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
329
1
import requests from bsa import BeautifulSoup def __snake_case ( __UpperCamelCase : str = "AAPL" ): """simple docstring""" A_ = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}''' A_ = BeautifulSoup(requests.get(__UpperCamelCase ).text ,"html.parser" ) A_ = "My(6px) Pos(r) smartphone_Mt(6px)" return soup.find("div" ,class_=class_ ).find("span" ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
329
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __a :Union[str, Any] = { 'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'], 'tokenization_biogpt': ['BioGptTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Optional[int] = [ 'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BioGptForCausalLM', 'BioGptForTokenClassification', 'BioGptForSequenceClassification', 'BioGptModel', 'BioGptPreTrainedModel', ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys __a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
329
1
import itertools import math def __snake_case ( __UpperCamelCase : int ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 ,int(math.sqrt(__UpperCamelCase ) + 1 ) ,6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __snake_case ( ): """simple docstring""" A_ = 2 while True: if is_prime(__UpperCamelCase ): yield num num += 1 def __snake_case ( __UpperCamelCase : int = 1_0001 ): """simple docstring""" return next(itertools.islice(prime_generator() ,nth - 1 ,__UpperCamelCase ) ) if __name__ == "__main__": print(F"{solution() = }")
329
import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def __snake_case ( __UpperCamelCase : Union[str, Any] ): """simple docstring""" if is_torch_version("<" ,"2.0.0" ) or not hasattr(__UpperCamelCase ,"_dynamo" ): return False return isinstance(__UpperCamelCase ,torch._dynamo.eval_frame.OptimizedModule ) def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : bool = True ): """simple docstring""" A_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) A_ = is_compiled_module(__UpperCamelCase ) if is_compiled: A_ = model A_ = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = model.module if not keep_fpaa_wrapper: A_ = getattr(__UpperCamelCase ,"forward" ) A_ = model.__dict__.pop("_original_forward" ,__UpperCamelCase ) if original_forward is not None: while hasattr(__UpperCamelCase ,"__wrapped__" ): A_ = forward.__wrapped__ if forward == original_forward: break A_ = forward if getattr(__UpperCamelCase ,"_converted_to_transformer_engine" ,__UpperCamelCase ): convert_model(__UpperCamelCase ,to_transformer_engine=__UpperCamelCase ) if is_compiled: A_ = model A_ = compiled_model return model def __snake_case ( ): """simple docstring""" PartialState().wait_for_everyone() def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Any ): """simple docstring""" if PartialState().distributed_type == DistributedType.TPU: xm.save(__UpperCamelCase ,__UpperCamelCase ) elif PartialState().local_process_index == 0: torch.save(__UpperCamelCase ,__UpperCamelCase ) @contextmanager def __snake_case ( **__UpperCamelCase : Any ): """simple docstring""" for key, value in kwargs.items(): A_ = str(__UpperCamelCase ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def __snake_case ( __UpperCamelCase : Optional[Any] ): """simple docstring""" if not hasattr(__UpperCamelCase ,"__qualname__" ) and not hasattr(__UpperCamelCase ,"__name__" ): A_ = getattr(__UpperCamelCase ,"__class__" ,__UpperCamelCase ) if hasattr(__UpperCamelCase ,"__qualname__" ): return obj.__qualname__ if hasattr(__UpperCamelCase ,"__name__" ): return obj.__name__ return str(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ): """simple docstring""" for key, value in source.items(): if isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = destination.setdefault(__UpperCamelCase ,{} ) merge_dicts(__UpperCamelCase ,__UpperCamelCase ) else: A_ = value return destination def __snake_case ( __UpperCamelCase : int = None ): """simple docstring""" if port is None: A_ = 2_9500 with socket.socket(socket.AF_INET ,socket.SOCK_STREAM ) as s: return s.connect_ex(("localhost", port) ) == 0
329
1
__a :Optional[Any] = 'Input must be a string of 8 numbers plus letter' __a :int = 'TRWAGMYFPDXBNJZSQVHLCKE' def __snake_case ( __UpperCamelCase : str ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = f'''Expected string as input, found {type(__UpperCamelCase ).__name__}''' raise TypeError(__UpperCamelCase ) A_ = spanish_id.replace("-" ,"" ).upper() if len(__UpperCamelCase ) != 9: raise ValueError(__UpperCamelCase ) try: A_ = int(spanish_id_clean[0:8] ) A_ = spanish_id_clean[8] except ValueError as ex: raise ValueError(__UpperCamelCase ) from ex if letter.isdigit(): raise ValueError(__UpperCamelCase ) return letter == LOOKUP_LETTERS[number % 23] if __name__ == "__main__": import doctest doctest.testmod()
329
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : int ): A_ = tempfile.mkdtemp() A_ = BlipImageProcessor() A_ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" ) A_ = BlipProcessor(UpperCAmelCase , UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def __A ( self : Optional[int] , **UpperCAmelCase : Union[str, Any] ): return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).tokenizer def __A ( self : Optional[Any] , **UpperCAmelCase : int ): return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).image_processor def __A ( self : Any ): shutil.rmtree(self.tmpdirname ) def __A ( self : Dict ): A_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] A_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def __A ( self : Any ): A_ = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) A_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 ) A_ = BlipProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase ) def __A ( self : Dict ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = self.prepare_image_inputs() A_ = image_processor(UpperCAmelCase , return_tensors="np" ) A_ = processor(images=UpperCAmelCase , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __A ( self : int ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = "lower newer" A_ = processor(text=UpperCAmelCase ) A_ = tokenizer(UpperCAmelCase , return_token_type_ids=UpperCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self : Tuple ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = "lower newer" A_ = self.prepare_image_inputs() A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase ): processor() def __A ( self : Any ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A_ = processor.batch_decode(UpperCAmelCase ) A_ = tokenizer.batch_decode(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) def __A ( self : Optional[Any] ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = "lower newer" A_ = self.prepare_image_inputs() A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
329
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class _a ( unittest.TestCase ): """simple docstring""" def __init__( self : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple=7 , UpperCAmelCase : Optional[Any]=3 , UpperCAmelCase : Union[str, Any]=30 , UpperCAmelCase : str=400 , UpperCAmelCase : Dict=True , UpperCAmelCase : Tuple=None , UpperCAmelCase : Dict=0.9 , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : List[str]=True , UpperCAmelCase : Any=[0.5, 0.5, 0.5] , UpperCAmelCase : Any=[0.5, 0.5, 0.5] , ): A_ = size if size is not None else {"shortest_edge": 30} A_ = crop_size if crop_size is not None else {"height": 30, "width": 30} A_ = parent A_ = batch_size A_ = num_channels A_ = min_resolution A_ = max_resolution A_ = do_resize_and_center_crop A_ = size A_ = crop_pct A_ = crop_size A_ = do_normalize A_ = image_mean A_ = image_std def __A ( self : Dict ): return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class _a ( snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : str = PoolFormerImageProcessor if is_vision_available() else None def __A ( self : Any ): A_ = PoolFormerImageProcessingTester(self ) @property def __A ( self : Dict ): return self.image_processor_tester.prepare_image_processor_dict() def __A ( self : List[str] ): A_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase , "do_resize_and_center_crop" ) ) self.assertTrue(hasattr(UpperCAmelCase , "size" ) ) self.assertTrue(hasattr(UpperCAmelCase , "crop_pct" ) ) self.assertTrue(hasattr(UpperCAmelCase , "do_normalize" ) ) self.assertTrue(hasattr(UpperCAmelCase , "image_mean" ) ) self.assertTrue(hasattr(UpperCAmelCase , "image_std" ) ) def __A ( self : Dict ): A_ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 30} ) self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30} ) A_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"shortest_edge": 42} ) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} ) def __A ( self : List[Any] ): pass def __A ( self : Tuple ): # Initialize image_processing A_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase , Image.Image ) # Test not batched input A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched A_ = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def __A ( self : Any ): # Initialize image_processing A_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase , np.ndarray ) # Test not batched input A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched A_ = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def __A ( self : str ): # Initialize image_processing A_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase , torch.Tensor ) # Test not batched input A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched A_ = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
329
import math __a :Union[str, Any] = 10 __a :Union[str, Any] = 7 __a :int = BALLS_PER_COLOUR * NUM_COLOURS def __snake_case ( __UpperCamelCase : int = 20 ): """simple docstring""" A_ = math.comb(__UpperCamelCase ,__UpperCamelCase ) A_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR ,__UpperCamelCase ) A_ = NUM_COLOURS * (1 - missing_colour / total) return f'''{result:.9f}''' if __name__ == "__main__": print(solution(20))
329
1
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : int ): A_ = tempfile.mkdtemp() A_ = BlipImageProcessor() A_ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" ) A_ = BlipProcessor(UpperCAmelCase , UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def __A ( self : Optional[int] , **UpperCAmelCase : Union[str, Any] ): return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).tokenizer def __A ( self : Optional[Any] , **UpperCAmelCase : int ): return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).image_processor def __A ( self : Any ): shutil.rmtree(self.tmpdirname ) def __A ( self : Dict ): A_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] A_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def __A ( self : Any ): A_ = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) A_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 ) A_ = BlipProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase ) def __A ( self : Dict ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = self.prepare_image_inputs() A_ = image_processor(UpperCAmelCase , return_tensors="np" ) A_ = processor(images=UpperCAmelCase , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __A ( self : int ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = "lower newer" A_ = processor(text=UpperCAmelCase ) A_ = tokenizer(UpperCAmelCase , return_token_type_ids=UpperCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self : Tuple ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = "lower newer" A_ = self.prepare_image_inputs() A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase ): processor() def __A ( self : Any ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A_ = processor.batch_decode(UpperCAmelCase ) A_ = tokenizer.batch_decode(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) def __A ( self : Optional[Any] ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = "lower newer" A_ = self.prepare_image_inputs() A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
329
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer __a :Optional[Any] = logging.get_logger(__name__) __a :Any = {'vocab_file': 'vocab.txt'} __a :Any = { 'vocab_file': { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt', } } __a :List[str] = { 'YituTech/conv-bert-base': 512, 'YituTech/conv-bert-medium-small': 512, 'YituTech/conv-bert-small': 512, } __a :List[str] = { 'YituTech/conv-bert-base': {'do_lower_case': True}, 'YituTech/conv-bert-medium-small': {'do_lower_case': True}, 'YituTech/conv-bert-small': {'do_lower_case': True}, } class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Tuple = VOCAB_FILES_NAMES _lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION _lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase : Union[str, Any] = ConvBertTokenizer def __init__( self : Optional[int] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : int="[UNK]" , UpperCAmelCase : str="[SEP]" , UpperCAmelCase : Union[str, Any]="[PAD]" , UpperCAmelCase : Tuple="[CLS]" , UpperCAmelCase : Tuple="[MASK]" , UpperCAmelCase : Any=True , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : List[str] , ): super().__init__( UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , ) A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars ): A_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) ) A_ = do_lower_case A_ = strip_accents A_ = tokenize_chinese_chars A_ = normalizer_class(**UpperCAmelCase ) A_ = do_lower_case def __A ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Dict=None ): A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __A ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ): A_ = [self.sep_token_id] A_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ): A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase ) return tuple(UpperCAmelCase )
329
1
import json import os import re import sys import urllib.request import requests from bsa import BeautifulSoup __a :Union[str, Any] = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36' ' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582' } def __snake_case ( __UpperCamelCase : str = "dhaka" ,__UpperCamelCase : int = 5 ): """simple docstring""" A_ = min(__UpperCamelCase ,50 ) # Prevent abuse! A_ = { "q": query, "tbm": "isch", "hl": "en", "ijn": "0", } A_ = requests.get("https://www.google.com/search" ,params=__UpperCamelCase ,headers=__UpperCamelCase ) A_ = BeautifulSoup(html.text ,"html.parser" ) A_ = "".join( re.findall(R"AF_initDataCallback\(([^<]+)\);" ,str(soup.select("script" ) ) ) ) A_ = json.dumps(__UpperCamelCase ) A_ = json.loads(__UpperCamelCase ) A_ = re.findall( R"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\"," ,__UpperCamelCase ,) if not matched_google_image_data: return 0 A_ = re.sub( R"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]" ,"" ,str(__UpperCamelCase ) ,) A_ = re.findall( R"(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]" ,__UpperCamelCase ,) for index, fixed_full_res_image in enumerate(__UpperCamelCase ): if index >= max_images: return index A_ = bytes(__UpperCamelCase ,"ascii" ).decode( "unicode-escape" ) A_ = bytes(__UpperCamelCase ,"ascii" ).decode( "unicode-escape" ) A_ = urllib.request.build_opener() A_ = [ ( "User-Agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" " (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582", ) ] urllib.request.install_opener(__UpperCamelCase ) A_ = f'''query_{query.replace(" " ,"_" )}''' if not os.path.exists(__UpperCamelCase ): os.makedirs(__UpperCamelCase ) urllib.request.urlretrieve( # noqa: S310 __UpperCamelCase ,f'''{path_name}/original_size_img_{index}.jpg''' ) return index if __name__ == "__main__": try: __a :Union[str, Any] = download_images_from_google_query(sys.argv[1]) print(F"{image_count} images were downloaded to disk.") except IndexError: print('Please provide a search term.') raise
329
import warnings from ...utils import logging from .image_processing_videomae import VideoMAEImageProcessor __a :Optional[Any] = logging.get_logger(__name__) class _a ( snake_case_ ): """simple docstring""" def __init__( self : List[str] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ): warnings.warn( "The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use VideoMAEImageProcessor instead." , UpperCAmelCase , ) super().__init__(*UpperCAmelCase , **UpperCAmelCase )
329
1
import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class _a ( unittest.TestCase ): """simple docstring""" def __init__( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str]=7 , UpperCAmelCase : str=3 , UpperCAmelCase : List[Any]=18 , UpperCAmelCase : Optional[Any]=30 , UpperCAmelCase : str=400 , UpperCAmelCase : Any=True , UpperCAmelCase : int=None , UpperCAmelCase : Tuple=True , UpperCAmelCase : str=None , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Tuple=[0.5, 0.5, 0.5] , UpperCAmelCase : Dict=[0.5, 0.5, 0.5] , UpperCAmelCase : Optional[int]=False , ): A_ = size if size is not None else {"height": 20, "width": 20} A_ = crop_size if crop_size is not None else {"height": 18, "width": 18} A_ = parent A_ = batch_size A_ = num_channels A_ = image_size A_ = min_resolution A_ = max_resolution A_ = do_resize A_ = size A_ = do_center_crop A_ = crop_size A_ = do_normalize A_ = image_mean A_ = image_std A_ = do_reduce_labels def __A ( self : Any ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def __snake_case ( ): """simple docstring""" A_ = load_dataset("hf-internal-testing/fixtures_ade20k" ,split="test" ) A_ = Image.open(dataset[0]["file"] ) A_ = Image.open(dataset[1]["file"] ) return image, map def __snake_case ( ): """simple docstring""" A_ = load_dataset("hf-internal-testing/fixtures_ade20k" ,split="test" ) A_ = Image.open(ds[0]["file"] ) A_ = Image.open(ds[1]["file"] ) A_ = Image.open(ds[2]["file"] ) A_ = Image.open(ds[3]["file"] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class _a ( snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : Any = BeitImageProcessor if is_vision_available() else None def __A ( self : Union[str, Any] ): A_ = BeitImageProcessingTester(self ) @property def __A ( self : Any ): return self.image_processor_tester.prepare_image_processor_dict() def __A ( self : Any ): A_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase , "do_resize" ) ) self.assertTrue(hasattr(UpperCAmelCase , "size" ) ) self.assertTrue(hasattr(UpperCAmelCase , "do_center_crop" ) ) self.assertTrue(hasattr(UpperCAmelCase , "center_crop" ) ) self.assertTrue(hasattr(UpperCAmelCase , "do_normalize" ) ) self.assertTrue(hasattr(UpperCAmelCase , "image_mean" ) ) self.assertTrue(hasattr(UpperCAmelCase , "image_std" ) ) def __A ( self : Any ): A_ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 20, "width": 20} ) self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} ) self.assertEqual(image_processor.do_reduce_labels , UpperCAmelCase ) A_ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=UpperCAmelCase ) self.assertEqual(image_processor.size , {"height": 42, "width": 42} ) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} ) self.assertEqual(image_processor.do_reduce_labels , UpperCAmelCase ) def __A ( self : List[str] ): pass def __A ( self : int ): # Initialize image_processing A_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase , Image.Image ) # Test not batched input A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched A_ = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def __A ( self : Optional[int] ): # Initialize image_processing A_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase , np.ndarray ) # Test not batched input A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched A_ = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def __A ( self : Optional[int] ): # Initialize image_processing A_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase , torch.Tensor ) # Test not batched input A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched A_ = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def __A ( self : Any ): # Initialize image_processing A_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase ) A_ = [] for image in image_inputs: self.assertIsInstance(UpperCAmelCase , torch.Tensor ) maps.append(torch.zeros(image.shape[-2:] ).long() ) # Test not batched input A_ = image_processing(image_inputs[0] , maps[0] , return_tensors="pt" ) self.assertEqual( encoding["pixel_values"].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual( encoding["labels"].shape , ( 1, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual(encoding["labels"].dtype , torch.long ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 255 ) # Test batched A_ = image_processing(UpperCAmelCase , UpperCAmelCase , return_tensors="pt" ) self.assertEqual( encoding["pixel_values"].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual( encoding["labels"].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual(encoding["labels"].dtype , torch.long ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 255 ) # Test not batched input (PIL images) A_ , A_ = prepare_semantic_single_inputs() A_ = image_processing(UpperCAmelCase , UpperCAmelCase , return_tensors="pt" ) self.assertEqual( encoding["pixel_values"].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual( encoding["labels"].shape , ( 1, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual(encoding["labels"].dtype , torch.long ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 255 ) # Test batched input (PIL images) A_ , A_ = prepare_semantic_batch_inputs() A_ = image_processing(UpperCAmelCase , UpperCAmelCase , return_tensors="pt" ) self.assertEqual( encoding["pixel_values"].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual( encoding["labels"].shape , ( 2, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) self.assertEqual(encoding["labels"].dtype , torch.long ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 255 ) def __A ( self : int ): # Initialize image_processing A_ = self.image_processing_class(**self.image_processor_dict ) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 A_ , A_ = prepare_semantic_single_inputs() A_ = image_processing(UpperCAmelCase , UpperCAmelCase , return_tensors="pt" ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 150 ) A_ = True A_ = image_processing(UpperCAmelCase , UpperCAmelCase , return_tensors="pt" ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 255 )
329
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _a : """simple docstring""" @staticmethod def __A ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Union[str, Any] ): pass @is_pipeline_test @require_vision class _a ( unittest.TestCase ): """simple docstring""" @require_torch def __A ( self : List[str] ): A_ = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , ) A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(UpperCAmelCase ) , [ [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}], ] , ) A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], ] , ) @require_tf def __A ( self : int ): A_ = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" ) A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , ) A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], ] , ) @slow @require_torch def __A ( self : Any ): A_ = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , ) # This is an image of 2 cats with remotes and no planes A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , ) @slow @require_tf def __A ( self : Optional[Any] ): A_ = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" ) # This is an image of 2 cats with remotes and no planes A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , )
329
1
import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class _a ( unittest.TestCase ): """simple docstring""" def __init__( self : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str]=7 , UpperCAmelCase : Optional[Any]=3 , UpperCAmelCase : Union[str, Any]=18 , UpperCAmelCase : List[str]=30 , UpperCAmelCase : Tuple=400 , UpperCAmelCase : Any=True , UpperCAmelCase : str=None , UpperCAmelCase : Optional[Any]=True , ): A_ = size if size is not None else {"height": 18, "width": 18} A_ = parent A_ = batch_size A_ = num_channels A_ = image_size A_ = min_resolution A_ = max_resolution A_ = do_resize A_ = size A_ = do_normalize def __A ( self : int ): return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804], [-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class _a ( snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : Optional[Any] = ImageGPTImageProcessor if is_vision_available() else None def __A ( self : Union[str, Any] ): A_ = ImageGPTImageProcessingTester(self ) @property def __A ( self : Optional[Any] ): return self.image_processor_tester.prepare_image_processor_dict() def __A ( self : Optional[int] ): A_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase , "clusters" ) ) self.assertTrue(hasattr(UpperCAmelCase , "do_resize" ) ) self.assertTrue(hasattr(UpperCAmelCase , "size" ) ) self.assertTrue(hasattr(UpperCAmelCase , "do_normalize" ) ) def __A ( self : Dict ): A_ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 18, "width": 18} ) A_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"height": 42, "width": 42} ) def __A ( self : List[Any] ): A_ = self.image_processing_class(**self.image_processor_dict ) A_ = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(UpperCAmelCase , obj[key] ) ) else: self.assertEqual(obj[key] , UpperCAmelCase ) def __A ( self : Dict ): A_ = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: A_ = os.path.join(UpperCAmelCase , "image_processor.json" ) image_processor_first.to_json_file(UpperCAmelCase ) A_ = self.image_processing_class.from_json_file(UpperCAmelCase ).to_dict() A_ = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(UpperCAmelCase , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , UpperCAmelCase ) def __A ( self : Any ): A_ = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(UpperCAmelCase ) A_ = self.image_processing_class.from_pretrained(UpperCAmelCase ).to_dict() A_ = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(UpperCAmelCase , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , UpperCAmelCase ) @unittest.skip("ImageGPT requires clusters at initialization" ) def __A ( self : str ): pass def __snake_case ( ): """simple docstring""" A_ = load_dataset("hf-internal-testing/fixtures_image_utils" ,split="test" ) A_ = Image.open(dataset[4]["file"] ) A_ = Image.open(dataset[5]["file"] ) A_ = [imagea, imagea] return images @require_vision @require_torch class _a ( unittest.TestCase ): """simple docstring""" @slow def __A ( self : Any ): A_ = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" ) A_ = prepare_images() # test non-batched A_ = image_processing(images[0] , return_tensors="pt" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 1024) ) A_ = [306, 191, 191] self.assertEqual(encoding.input_ids[0, :3].tolist() , UpperCAmelCase ) # test batched A_ = image_processing(UpperCAmelCase , return_tensors="pt" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 1024) ) A_ = [303, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() , UpperCAmelCase )
329
import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict=10 ): """simple docstring""" A_ = [] for _ in range(__UpperCamelCase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Tuple=10 ): """simple docstring""" A_ = [] for step in range(__UpperCamelCase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: A_ = os.path.join(__UpperCamelCase ,"schedule.bin" ) torch.save(scheduler.state_dict() ,__UpperCamelCase ) A_ = torch.load(__UpperCamelCase ) scheduler.load_state_dict(__UpperCamelCase ) return lrs @require_torch class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : Any , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ): self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) ) for a, b in zip(UpperCAmelCase , UpperCAmelCase ): self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase ) def __A ( self : List[Any] ): A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase ) A_ = torch.tensor([0.4, 0.2, -0.5] ) A_ = nn.MSELoss() # No warmup, constant schedule, no gradient clipping A_ = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 ) for _ in range(100 ): A_ = criterion(UpperCAmelCase , UpperCAmelCase ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) def __A ( self : Dict ): A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase ) A_ = torch.tensor([0.4, 0.2, -0.5] ) A_ = nn.MSELoss() # No warmup, constant schedule, no gradient clipping A_ = Adafactor( params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCAmelCase , weight_decay=0.0 , relative_step=UpperCAmelCase , scale_parameter=UpperCAmelCase , warmup_init=UpperCAmelCase , ) for _ in range(1000 ): A_ = criterion(UpperCAmelCase , UpperCAmelCase ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) @require_torch class _a ( unittest.TestCase ): """simple docstring""" _lowerCamelCase : Optional[int] = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None _lowerCamelCase : Any = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None _lowerCamelCase : Any = 1_0 def __A ( self : str , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Dict=None ): self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) ) for a, b in zip(UpperCAmelCase , UpperCAmelCase ): self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase , msg=UpperCAmelCase ) def __A ( self : List[Any] ): A_ = {"num_warmup_steps": 2, "num_training_steps": 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) A_ = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"num_warmup_steps": 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, "num_cycles": 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, "power": 2.0, "lr_end": 1E-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {"num_warmup_steps": 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): A_ , A_ = data A_ = scheduler_func(self.optimizer , **UpperCAmelCase ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) A_ = unwrap_schedule(UpperCAmelCase , self.num_steps ) self.assertListAlmostEqual( UpperCAmelCase , UpperCAmelCase , tol=1E-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , ) A_ = scheduler_func(self.optimizer , **UpperCAmelCase ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(UpperCAmelCase ) # wrap to test picklability of the schedule A_ = unwrap_and_save_reload_schedule(UpperCAmelCase , self.num_steps ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase , msg=f'''failed for {scheduler_func} in save and reload''' ) class _a : """simple docstring""" def __init__( self : List[str] , UpperCAmelCase : List[str] ): A_ = fn def __call__( self : Union[str, Any] , *UpperCAmelCase : str , **UpperCAmelCase : Optional[Any] ): return self.fn(*UpperCAmelCase , **UpperCAmelCase ) @classmethod def __A ( self : Dict , UpperCAmelCase : List[str] ): A_ = list(map(self , scheduler.lr_lambdas ) )
329
1
import datasets __a :Any = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n' __a :int = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n' __a :Optional[int] = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n' def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ): """simple docstring""" return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _a ( datasets.Metric ): """simple docstring""" def __A ( self : Optional[Any] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), "references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), } ) , codebase_urls=[] , reference_urls=[] , format="numpy" , ) def __A ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int ): return {"accuracy": simple_accuracy(UpperCAmelCase , UpperCAmelCase )}
329
import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def __snake_case ( __UpperCamelCase : Optional[int] ): # picklable for multiprocessing """simple docstring""" return x.sum() def __snake_case ( __UpperCamelCase : List[str] ): # picklable for multiprocessing """simple docstring""" return i + 1 @dataclass class _a : """simple docstring""" _lowerCamelCase : int _lowerCamelCase : str class _a ( snake_case_ ): """simple docstring""" def __A ( self : Dict ): A_ = {} A_ = [] A_ = 1 A_ = [1, 2] A_ = {"a": 1, "b": 2} A_ = {"a": [1, 2], "b": [3, 4]} A_ = {"a": {"1": 1}, "b": 2} A_ = {"a": 1, "b": 2, "c": 3, "d": 4} A_ = {} A_ = [] A_ = 2 A_ = [2, 3] A_ = {"a": 2, "b": 3} A_ = {"a": [2, 3], "b": [4, 5]} A_ = {"a": {"1": 2}, "b": 3} A_ = {"a": 2, "b": 3, "c": 4, "d": 5} self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) A_ = 2 self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) A_ = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )} A_ = {"a": 2, "b": 0, "c": 2} A_ = { "a": np.eye(2 ).astype(UpperCAmelCase ), "b": np.zeros(3 ).astype(UpperCAmelCase ), "c": np.ones(2 ).astype(UpperCAmelCase ), } self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual( {k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual( {k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(UpperCAmelCase ): # can't pickle a local lambda map_nested(lambda UpperCAmelCase : x + 1 , UpperCAmelCase , num_proc=UpperCAmelCase ) def __A ( self : List[str] ): A_ = {"a": 1, "b": 2} A_ = {"a": 3, "b": 4} A_ = {"a": 5, "b": 6} A_ = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ) , UpperCAmelCase ) def __A ( self : Any ): class _a : """simple docstring""" _lowerCamelCase : int = 'bar' A_ = Foo() self.assertEqual(foo.my_attr , "bar" ) with temporary_assignment(UpperCAmelCase , "my_attr" , "BAR" ): self.assertEqual(foo.my_attr , "BAR" ) self.assertEqual(foo.my_attr , "bar" ) @pytest.mark.parametrize( "iterable_length, num_proc, expected_num_proc" ,[ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ] ,) def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[Any] ): """simple docstring""" with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch( "datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool: A_ = {f'''{i}''': i for i in range(__UpperCamelCase )} A_ = map_nested(lambda __UpperCamelCase : x + 10 ,__UpperCamelCase ,num_proc=__UpperCamelCase ,parallel_min_length=16 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class _a ( snake_case_ ): """simple docstring""" @require_tf def __A ( self : Union[str, Any] ): import tensorflow as tf from tensorflow.keras import layers A_ = layers.Dense(2 ) def gen_random_output(): A_ = tf.random.uniform((1, 3) ) return model(UpperCAmelCase ).numpy() with temp_seed(42 , set_tensorflow=UpperCAmelCase ): A_ = gen_random_output() with temp_seed(42 , set_tensorflow=UpperCAmelCase ): A_ = gen_random_output() A_ = gen_random_output() np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @require_torch def __A ( self : Optional[int] ): import torch def gen_random_output(): A_ = torch.nn.Linear(3 , 2 ) A_ = torch.rand(1 , 3 ) return model(UpperCAmelCase ).detach().numpy() with temp_seed(42 , set_pytorch=UpperCAmelCase ): A_ = gen_random_output() with temp_seed(42 , set_pytorch=UpperCAmelCase ): A_ = gen_random_output() A_ = gen_random_output() np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) def __A ( self : Any ): def gen_random_output(): return np.random.rand(1 , 3 ) with temp_seed(42 ): A_ = gen_random_output() with temp_seed(42 ): A_ = gen_random_output() A_ = gen_random_output() np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @pytest.mark.parametrize("input_data" ,[{}] ) def __snake_case ( __UpperCamelCase : str ): """simple docstring""" A_ = NestedDataStructure(__UpperCamelCase ).data assert output_data == input_data @pytest.mark.parametrize( "data, expected_output" ,[ ({}, []), ([], []), ("foo", ["foo"]), (["foo", "bar"], ["foo", "bar"]), ([["foo", "bar"]], ["foo", "bar"]), ([[["foo"], ["bar"]]], ["foo", "bar"]), ([[["foo"], "bar"]], ["foo", "bar"]), ({"a": 1, "b": 2}, [1, 2]), ({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]), ({"a": {"1": 1}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": [2]}, [1, 2]), ] ,) def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Any ): """simple docstring""" A_ = NestedDataStructure(__UpperCamelCase ).flatten() assert output == expected_output def __snake_case ( ): """simple docstring""" A_ = A(x=1 ,y="foobar" ) A_ = {"x": 1, "y": "foobar"} assert asdict(__UpperCamelCase ) == expected_output A_ = {"a": {"b": A(x=10 ,y="foo" )}, "c": [A(x=20 ,y="bar" )]} A_ = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]} assert asdict(__UpperCamelCase ) == expected_output with pytest.raises(__UpperCamelCase ): asdict([1, A(x=10 ,y="foo" )] ) def __snake_case ( __UpperCamelCase : str ): """simple docstring""" return text.split() def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def __snake_case ( ): """simple docstring""" with Pool(2 ) as pool: A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) ) assert out.count("hello" ) == 10 assert out.count("there" ) == 10 assert len(__UpperCamelCase ) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) ) assert out.count("hello" ) == 10 assert out.count("there" ) == 10 assert len(__UpperCamelCase ) == 20 # check that we get items as fast as possible with Pool(2 ) as pool: A_ = [] for yield_time, content in iflatmap_unordered( __UpperCamelCase ,_aseconds_generator_of_aitems_with_timing ,kwargs_iterable=[{"content": "a"}, {"content": "b"}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(__UpperCamelCase ) assert out.count("a" ) == 2 assert out.count("b" ) == 2 assert len(__UpperCamelCase ) == 4
329
1
import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): __a :Optional[Any] = yaml.safe_load( '\\nname: ""\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: "Dataset Card for X" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Table of Contents"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Dataset Description"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: "Dataset Summary"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Supported Tasks and Leaderboards"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n' ) __a :Dict = { 'name': 'root', 'text': '', 'is_empty_text': True, 'subsections': [ { 'name': 'Dataset Card for My Dataset', 'text': '', 'is_empty_text': True, 'subsections': [ {'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []}, { 'name': 'Dataset Description', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': [ { 'name': 'Dataset Summary', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': [], }, { 'name': 'Supported Tasks and Leaderboards', 'text': '', 'is_empty_text': True, 'subsections': [], }, {'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []}, ], }, ], } ], } __a :int = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' __a :int = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' __a :Tuple = { 'name': 'root', 'text': '', 'is_empty_text': True, 'subsections': [ { 'name': 'Dataset Card for My Dataset', 'text': '', 'is_empty_text': True, 'subsections': [ {'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []}, { 'name': 'Dataset Description', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': [ { 'name': 'Dataset Summary', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': [ { 'name': 'Extra Ignored Subsection', 'text': '', 'is_empty_text': True, 'subsections': [], } ], }, { 'name': 'Supported Tasks and Leaderboards', 'text': '', 'is_empty_text': True, 'subsections': [], }, {'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []}, ], }, ], } ], } __a :Optional[Any] = '\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' __a :Optional[int] = ( 'The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.' ) __a :Optional[Any] = '\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' __a :List[Any] = ( 'The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.' ) __a :Any = '\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' __a :Union[str, Any] = 'The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.' __a :Optional[int] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' __a :Optional[Any] = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).' __a :Any = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n' __a :Optional[Any] = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.' __a :List[str] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n' __a :int = 'The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.' __a :Tuple = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n' __a :Optional[int] = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.' __a :int = '\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' __a :Any = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.' __a :Any = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n' __a :Any = 'The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.' __a :List[Any] = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' __a :Optional[Any] = 'The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.' __a :str = '' __a :Optional[Any] = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.' __a :Dict = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' __a :List[str] = 'The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.' @pytest.mark.parametrize( "readme_md, expected_dict" ,[ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] ,) def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : List[str] ): """simple docstring""" assert ReadMe.from_string(__UpperCamelCase ,__UpperCamelCase ).to_dict() == expected_dict @pytest.mark.parametrize( "readme_md, expected_error" ,[ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] ,) def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Optional[Any] ): """simple docstring""" with pytest.raises(__UpperCamelCase ,match=re.escape(expected_error.format(path="root" ) ) ): A_ = ReadMe.from_string(__UpperCamelCase ,__UpperCamelCase ) readme.validate() @pytest.mark.parametrize( "readme_md, expected_error" ,[ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] ,) def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ): """simple docstring""" with pytest.raises(__UpperCamelCase ,match=re.escape(expected_error.format(path="root" ) ) ): ReadMe.from_string(__UpperCamelCase ,__UpperCamelCase ) @pytest.mark.parametrize( "readme_md," ,[ (README_MULTIPLE_SAME_HEADING_1), ] ,) def __snake_case ( __UpperCamelCase : Optional[int] ): """simple docstring""" ReadMe.from_string(__UpperCamelCase ,__UpperCamelCase ,suppress_parsing_errors=__UpperCamelCase ) @pytest.mark.parametrize( "readme_md, expected_dict" ,[ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] ,) def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: A_ = Path(__UpperCamelCase ) / "README.md" with open(__UpperCamelCase ,"w+" ) as readme_file: readme_file.write(__UpperCamelCase ) A_ = ReadMe.from_readme(__UpperCamelCase ,__UpperCamelCase ).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( "readme_md, expected_error" ,[ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] ,) def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : List[str] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: A_ = Path(__UpperCamelCase ) / "README.md" with open(__UpperCamelCase ,"w+" ) as readme_file: readme_file.write(__UpperCamelCase ) A_ = expected_error.format(path=__UpperCamelCase ) with pytest.raises(__UpperCamelCase ,match=re.escape(__UpperCamelCase ) ): A_ = ReadMe.from_readme(__UpperCamelCase ,__UpperCamelCase ) readme.validate() @pytest.mark.parametrize( "readme_md, expected_error" ,[ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] ,) def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : Dict ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: A_ = Path(__UpperCamelCase ) / "README.md" with open(__UpperCamelCase ,"w+" ) as readme_file: readme_file.write(__UpperCamelCase ) A_ = expected_error.format(path=__UpperCamelCase ) with pytest.raises(__UpperCamelCase ,match=re.escape(__UpperCamelCase ) ): ReadMe.from_readme(__UpperCamelCase ,__UpperCamelCase ) @pytest.mark.parametrize( "readme_md," ,[ (README_MULTIPLE_SAME_HEADING_1), ] ,) def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: A_ = Path(__UpperCamelCase ) / "README.md" with open(__UpperCamelCase ,"w+" ) as readme_file: readme_file.write(__UpperCamelCase ) ReadMe.from_readme(__UpperCamelCase ,__UpperCamelCase ,suppress_parsing_errors=__UpperCamelCase )
329
import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" if ( (cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F) or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) # or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) # or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) # or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) # or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) # or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F) or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) # ): # return True return False def __snake_case ( __UpperCamelCase : str ): """simple docstring""" for char in word: A_ = ord(__UpperCamelCase ) if not _is_chinese_char(__UpperCamelCase ): return 0 return 1 def __snake_case ( __UpperCamelCase : List[str] ): """simple docstring""" A_ = set() for token in tokens: A_ = len(__UpperCamelCase ) > 1 and is_chinese(__UpperCamelCase ) if chinese_word: word_set.add(__UpperCamelCase ) A_ = list(__UpperCamelCase ) return word_list def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : set() ): """simple docstring""" if not chinese_word_set: return bert_tokens A_ = max([len(__UpperCamelCase ) for w in chinese_word_set] ) A_ = bert_tokens A_ , A_ = 0, len(__UpperCamelCase ) while start < end: A_ = True if is_chinese(bert_word[start] ): A_ = min(end - start ,__UpperCamelCase ) for i in range(__UpperCamelCase ,1 ,-1 ): A_ = "".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 ,start + i ): A_ = "##" + bert_word[j] A_ = start + i A_ = False break if single_word: start += 1 return bert_word def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : LTP ,__UpperCamelCase : BertTokenizer ): """simple docstring""" A_ = [] for i in range(0 ,len(__UpperCamelCase ) ,100 ): A_ = ltp_tokenizer.seg(lines[i : i + 100] )[0] A_ = [get_chinese_word(__UpperCamelCase ) for r in res] ltp_res.extend(__UpperCamelCase ) assert len(__UpperCamelCase ) == len(__UpperCamelCase ) A_ = [] for i in range(0 ,len(__UpperCamelCase ) ,100 ): A_ = bert_tokenizer(lines[i : i + 100] ,add_special_tokens=__UpperCamelCase ,truncation=__UpperCamelCase ,max_length=512 ) bert_res.extend(res["input_ids"] ) assert len(__UpperCamelCase ) == len(__UpperCamelCase ) A_ = [] for input_ids, chinese_word in zip(__UpperCamelCase ,__UpperCamelCase ): A_ = [] for id in input_ids: A_ = bert_tokenizer._convert_id_to_token(__UpperCamelCase ) input_tokens.append(__UpperCamelCase ) A_ = add_sub_symbol(__UpperCamelCase ,__UpperCamelCase ) A_ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(__UpperCamelCase ): if token[:2] == "##": A_ = token[2:] # save chinese tokens' pos if len(__UpperCamelCase ) == 1 and _is_chinese_char(ord(__UpperCamelCase ) ): ref_id.append(__UpperCamelCase ) ref_ids.append(__UpperCamelCase ) assert len(__UpperCamelCase ) == len(__UpperCamelCase ) return ref_ids def __snake_case ( __UpperCamelCase : Dict ): """simple docstring""" with open(args.file_name ,"r" ,encoding="utf-8" ) as f: A_ = f.readlines() A_ = [line.strip() for line in data if len(__UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' A_ = LTP(args.ltp ) # faster in GPU device A_ = BertTokenizer.from_pretrained(args.bert ) A_ = prepare_ref(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) with open(args.save_path ,"w" ,encoding="utf-8" ) as f: A_ = [json.dumps(__UpperCamelCase ) + "\n" for ref in ref_ids] f.writelines(__UpperCamelCase ) if __name__ == "__main__": __a :List[Any] = argparse.ArgumentParser(description='prepare_chinese_ref') parser.add_argument( '--file_name', type=str, default='./resources/chinese-demo.txt', help='file need process, same as training data in lm', ) parser.add_argument( '--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path' ) parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer') parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res') __a :Dict = parser.parse_args() main(args)
329
1
import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore __a :int = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" __a :Any = [file for file in filepaths if file != file.lower()] if upper_files: print(F"{len(upper_files)} files contain uppercase characters:") print('\n'.join(upper_files) + '\n') __a :Tuple = [file for file in filepaths if ' ' in file] if space_files: print(F"{len(space_files)} files contain space characters:") print('\n'.join(space_files) + '\n') __a :str = [file for file in filepaths if '-' in file] if hyphen_files: print(F"{len(hyphen_files)} files contain hyphen characters:") print('\n'.join(hyphen_files) + '\n') __a :List[str] = [file for file in filepaths if os.sep not in file] if nodir_files: print(F"{len(nodir_files)} files are not in a directory:") print('\n'.join(nodir_files) + '\n') __a :Any = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
329
import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def __snake_case ( __UpperCamelCase : Features ): """simple docstring""" A_ = np.inf def set_batch_size(__UpperCamelCase : FeatureType ) -> None: nonlocal batch_size if isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(__UpperCamelCase ,__UpperCamelCase ) and feature.dtype == "binary": A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(__UpperCamelCase ,__UpperCamelCase ) return None if batch_size is np.inf else batch_size class _a ( snake_case_ ): """simple docstring""" def __init__( self : Tuple , UpperCAmelCase : NestedDataStructureLike[PathLike] , UpperCAmelCase : Optional[NamedSplit] = None , UpperCAmelCase : Optional[Features] = None , UpperCAmelCase : str = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : Tuple , ): super().__init__( UpperCAmelCase , split=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , keep_in_memory=UpperCAmelCase , streaming=UpperCAmelCase , num_proc=UpperCAmelCase , **UpperCAmelCase , ) A_ = path_or_paths if isinstance(UpperCAmelCase , UpperCAmelCase ) else {self.split: path_or_paths} A_ = _PACKAGED_DATASETS_MODULES["parquet"][1] A_ = Parquet( cache_dir=UpperCAmelCase , data_files=UpperCAmelCase , features=UpperCAmelCase , hash=UpperCAmelCase , **UpperCAmelCase , ) def __A ( self : Optional[Any] ): # Build iterable dataset if self.streaming: A_ = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: A_ = None A_ = None A_ = None A_ = None self.builder.download_and_prepare( download_config=UpperCAmelCase , download_mode=UpperCAmelCase , verification_mode=UpperCAmelCase , base_path=UpperCAmelCase , num_proc=self.num_proc , ) A_ = self.builder.as_dataset( split=self.split , verification_mode=UpperCAmelCase , in_memory=self.keep_in_memory ) return dataset class _a : """simple docstring""" def __init__( self : Any , UpperCAmelCase : Dataset , UpperCAmelCase : Union[PathLike, BinaryIO] , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : List[Any] , ): A_ = dataset A_ = path_or_buf A_ = batch_size or get_writer_batch_size(dataset.features ) A_ = parquet_writer_kwargs def __A ( self : int ): A_ = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , "wb+" ) as buffer: A_ = self._write(file_obj=UpperCAmelCase , batch_size=UpperCAmelCase , **self.parquet_writer_kwargs ) else: A_ = self._write(file_obj=self.path_or_buf , batch_size=UpperCAmelCase , **self.parquet_writer_kwargs ) return written def __A ( self : Tuple , UpperCAmelCase : BinaryIO , UpperCAmelCase : int , **UpperCAmelCase : Optional[Any] ): A_ = 0 A_ = parquet_writer_kwargs.pop("path_or_buf" , UpperCAmelCase ) A_ = self.dataset.features.arrow_schema A_ = pq.ParquetWriter(UpperCAmelCase , schema=UpperCAmelCase , **UpperCAmelCase ) for offset in logging.tqdm( range(0 , len(self.dataset ) , UpperCAmelCase ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ): A_ = query_table( table=self.dataset._data , key=slice(UpperCAmelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(UpperCAmelCase ) written += batch.nbytes writer.close() return written
329
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _a ( unittest.TestCase ): """simple docstring""" def __init__( self : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict=7 , UpperCAmelCase : Optional[Any]=3 , UpperCAmelCase : List[str]=18 , UpperCAmelCase : Optional[Any]=30 , UpperCAmelCase : Union[str, Any]=400 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : int=None , UpperCAmelCase : Tuple=True , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Tuple=True , ): A_ = size if size is not None else {"shortest_edge": 20} A_ = crop_size if crop_size is not None else {"height": 18, "width": 18} A_ = parent A_ = batch_size A_ = num_channels A_ = image_size A_ = min_resolution A_ = max_resolution A_ = do_resize A_ = size A_ = do_center_crop A_ = crop_size A_ = do_flip_channel_order def __A ( self : Dict ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class _a ( snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : Any = MobileViTImageProcessor if is_vision_available() else None def __A ( self : str ): A_ = MobileViTImageProcessingTester(self ) @property def __A ( self : List[Any] ): return self.image_processor_tester.prepare_image_processor_dict() def __A ( self : Dict ): A_ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase , "do_resize" ) ) self.assertTrue(hasattr(UpperCAmelCase , "size" ) ) self.assertTrue(hasattr(UpperCAmelCase , "do_center_crop" ) ) self.assertTrue(hasattr(UpperCAmelCase , "center_crop" ) ) self.assertTrue(hasattr(UpperCAmelCase , "do_flip_channel_order" ) ) def __A ( self : str ): A_ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 20} ) self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} ) A_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"shortest_edge": 42} ) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} ) def __A ( self : Any ): pass def __A ( self : Optional[Any] ): # Initialize image_processing A_ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase , Image.Image ) # Test not batched input A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched A_ = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def __A ( self : Optional[int] ): # Initialize image_processing A_ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase , np.ndarray ) # Test not batched input A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched A_ = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def __A ( self : Tuple ): # Initialize image_processing A_ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase , torch.Tensor ) # Test not batched input A_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched A_ = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
329
from __future__ import annotations def __snake_case ( __UpperCamelCase : int = 4 ): """simple docstring""" A_ = abs(__UpperCamelCase ) or 4 return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )] def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" return reverse_row(transpose(__UpperCamelCase ) ) # OR.. transpose(reverse_column(matrix)) def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" return reverse_row(reverse_column(__UpperCamelCase ) ) # OR.. reverse_column(reverse_row(matrix)) def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" return reverse_column(transpose(__UpperCamelCase ) ) # OR.. transpose(reverse_row(matrix)) def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" A_ = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )] return matrix def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" A_ = matrix[::-1] return matrix def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" A_ = [x[::-1] for x in matrix] return matrix def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" for i in matrix: print(*__UpperCamelCase ) if __name__ == "__main__": __a :Any = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 90 counterclockwise:\n') print_matrix(rotate_aa(matrix)) __a :Any = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 180:\n') print_matrix(rotate_aaa(matrix)) __a :Any = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 270 counterclockwise:\n') print_matrix(rotate_aaa(matrix))
329
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __a :str = logging.get_logger(__name__) __a :Union[str, Any] = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'} class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Any = 'ctrl' _lowerCamelCase : Optional[int] = ['past_key_values'] _lowerCamelCase : str = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : Dict , UpperCAmelCase : Tuple=246534 , UpperCAmelCase : Any=256 , UpperCAmelCase : List[Any]=1280 , UpperCAmelCase : List[str]=8192 , UpperCAmelCase : Optional[Any]=48 , UpperCAmelCase : Optional[int]=16 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : str=0.1 , UpperCAmelCase : Optional[int]=1E-6 , UpperCAmelCase : Tuple=0.02 , UpperCAmelCase : Tuple=True , **UpperCAmelCase : Tuple , ): A_ = vocab_size A_ = n_positions A_ = n_embd A_ = n_layer A_ = n_head A_ = dff A_ = resid_pdrop A_ = embd_pdrop A_ = layer_norm_epsilon A_ = initializer_range A_ = use_cache super().__init__(**UpperCAmelCase )
329
from ..utils import DummyObject, requires_backends class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Union[str, Any] = ['torch', 'transformers', 'onnx'] def __init__( self : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Tuple , *UpperCAmelCase : Tuple , **UpperCAmelCase : Union[str, Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Dict , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Tuple ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Tuple = ['torch', 'transformers', 'onnx'] def __init__( self : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : List[Any] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Tuple , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Any = ['torch', 'transformers', 'onnx'] def __init__( self : Dict , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Optional[int] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : List[str] = ['torch', 'transformers', 'onnx'] def __init__( self : List[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : int ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Any , *UpperCAmelCase : List[Any] , **UpperCAmelCase : str ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Dict = ['torch', 'transformers', 'onnx'] def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : Tuple ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : Dict ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : int , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : List[str] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : List[Any] = ['torch', 'transformers', 'onnx'] def __init__( self : str , *UpperCAmelCase : str , **UpperCAmelCase : List[Any] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : List[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "transformers", "onnx"] )
329
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __a :int = logging.get_logger(__name__) def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : int ): """simple docstring""" A_ = b.T A_ = np.sum(np.square(__UpperCamelCase ) ,axis=1 ) A_ = np.sum(np.square(__UpperCamelCase ) ,axis=0 ) A_ = np.matmul(__UpperCamelCase ,__UpperCamelCase ) A_ = aa[:, None] - 2 * ab + ba[None, :] return d def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : str ): """simple docstring""" A_ = x.reshape(-1 ,3 ) A_ = squared_euclidean_distance(__UpperCamelCase ,__UpperCamelCase ) return np.argmin(__UpperCamelCase ,axis=1 ) class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Tuple = ['pixel_values'] def __init__( self : Dict , UpperCAmelCase : Optional[Union[List[List[int]], np.ndarray]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , UpperCAmelCase : bool = True , **UpperCAmelCase : int , ): super().__init__(**UpperCAmelCase ) A_ = size if size is not None else {"height": 256, "width": 256} A_ = get_size_dict(UpperCAmelCase ) A_ = np.array(UpperCAmelCase ) if clusters is not None else None A_ = do_resize A_ = size A_ = resample A_ = do_normalize A_ = do_color_quantize def __A ( self : Tuple , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : List[Any] , ): A_ = get_size_dict(UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' ) return resize( UpperCAmelCase , size=(size["height"], size["width"]) , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def __A ( self : Tuple , UpperCAmelCase : np.ndarray , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , ): A_ = rescale(image=UpperCAmelCase , scale=1 / 127.5 , data_format=UpperCAmelCase ) A_ = image - 1 return image def __A ( self : List[str] , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[Union[List[List[int]], np.ndarray]] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **UpperCAmelCase : Optional[int] , ): A_ = do_resize if do_resize is not None else self.do_resize A_ = size if size is not None else self.size A_ = get_size_dict(UpperCAmelCase ) A_ = resample if resample is not None else self.resample A_ = do_normalize if do_normalize is not None else self.do_normalize A_ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize A_ = clusters if clusters is not None else self.clusters A_ = np.array(UpperCAmelCase ) A_ = make_list_of_images(UpperCAmelCase ) if not valid_images(UpperCAmelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_color_quantize and clusters is None: raise ValueError("Clusters must be specified if do_color_quantize is True." ) # All transformations expect numpy arrays. A_ = [to_numpy_array(UpperCAmelCase ) for image in images] if do_resize: A_ = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images] if do_normalize: A_ = [self.normalize(image=UpperCAmelCase ) for image in images] if do_color_quantize: A_ = [to_channel_dimension_format(UpperCAmelCase , ChannelDimension.LAST ) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) A_ = np.array(UpperCAmelCase ) A_ = color_quantize(UpperCAmelCase , UpperCAmelCase ).reshape(images.shape[:-1] ) # flatten to (batch_size, height*width) A_ = images.shape[0] A_ = images.reshape(UpperCAmelCase , -1 ) # We need to convert back to a list of images to keep consistent behaviour across processors. A_ = list(UpperCAmelCase ) else: A_ = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images] A_ = {"input_ids": images} return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
329
import itertools import math def __snake_case ( __UpperCamelCase : int ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 ,int(math.sqrt(__UpperCamelCase ) + 1 ) ,6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __snake_case ( ): """simple docstring""" A_ = 2 while True: if is_prime(__UpperCamelCase ): yield num num += 1 def __snake_case ( __UpperCamelCase : int = 1_0001 ): """simple docstring""" return next(itertools.islice(prime_generator() ,nth - 1 ,__UpperCamelCase ) ) if __name__ == "__main__": print(F"{solution() = }")
329
1
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Dict=False ): """simple docstring""" try: A_ = os.environ[key] except KeyError: # KEY isn't set, default to `default`. A_ = default else: # KEY is set, convert it to True or False. try: A_ = strtobool(__UpperCamelCase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f'''If set, {key} must be yes or no.''' ) return _value __a :Any = parse_flag_from_env('RUN_SLOW', default=False) def __snake_case ( __UpperCamelCase : Tuple ): """simple docstring""" return unittest.skip("Test was skipped" )(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Tuple ): """simple docstring""" return unittest.skipUnless(_run_slow_tests ,"test is slow" )(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Any ): """simple docstring""" return unittest.skipUnless(not torch.cuda.is_available() ,"test requires only a CPU" )(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Optional[int] ): """simple docstring""" return unittest.skipUnless(torch.cuda.is_available() ,"test requires a GPU" )(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : List[str] ): """simple docstring""" return unittest.skipUnless(is_xpu_available() ,"test requires a XPU" )(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Union[str, Any] ): """simple docstring""" return unittest.skipUnless(is_mps_available() ,"test requires a `mps` backend support in `torch`" )(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Optional[int] ): """simple docstring""" return unittest.skipUnless( is_transformers_available() and is_datasets_available() ,"test requires the Hugging Face suite" )(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Tuple ): """simple docstring""" return unittest.skipUnless(is_bnb_available() ,"test requires the bitsandbytes library" )(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" return unittest.skipUnless(is_tpu_available() ,"test requires TPU" )(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : int ): """simple docstring""" return unittest.skipUnless(torch.cuda.device_count() == 1 ,"test requires a GPU" )(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Dict ): """simple docstring""" return unittest.skipUnless(torch.xpu.device_count() == 1 ,"test requires a XPU" )(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Optional[Any] ): """simple docstring""" return unittest.skipUnless(torch.cuda.device_count() > 1 ,"test requires multiple GPUs" )(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : int ): """simple docstring""" return unittest.skipUnless(torch.xpu.device_count() > 1 ,"test requires multiple XPUs" )(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Optional[int] ): """simple docstring""" return unittest.skipUnless(is_safetensors_available() ,"test requires safetensors" )(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Optional[Any] ): """simple docstring""" return unittest.skipUnless(is_deepspeed_available() ,"test requires DeepSpeed" )(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Optional[int] ): """simple docstring""" return unittest.skipUnless(is_torch_version(">=" ,"1.12.0" ) ,"test requires torch version >= 1.12.0" )(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : List[Any]=None ,__UpperCamelCase : Optional[Any]=None ): """simple docstring""" if test_case is None: return partial(__UpperCamelCase ,version=__UpperCamelCase ) return unittest.skipUnless(is_torch_version(">=" ,__UpperCamelCase ) ,f'''test requires torch version >= {version}''' )(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Union[str, Any] ): """simple docstring""" return unittest.skipUnless(is_tensorboard_available() ,"test requires Tensorboard" )(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Optional[int] ): """simple docstring""" return unittest.skipUnless(is_wandb_available() ,"test requires wandb" )(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : List[str] ): """simple docstring""" return unittest.skipUnless(is_comet_ml_available() ,"test requires comet_ml" )(__UpperCamelCase ) __a :List[str] = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def __snake_case ( __UpperCamelCase : Union[str, Any] ): """simple docstring""" return unittest.skipUnless( _atleast_one_tracker_available ,"test requires at least one tracker to be available and for `comet_ml` to not be installed" ,)(__UpperCamelCase ) class _a ( unittest.TestCase ): """simple docstring""" _lowerCamelCase : Union[str, Any] = True @classmethod def __A ( cls : Any ): A_ = tempfile.mkdtemp() @classmethod def __A ( cls : List[Any] ): if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def __A ( self : Dict ): if self.clear_on_setup: for path in Path(self.tmpdir ).glob("**/*" ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(UpperCAmelCase ) class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : Optional[int] ): super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : Union[str, Any] , UpperCAmelCase : Union[mock.Mock, List[mock.Mock]] ): A_ = mocks if isinstance(UpperCAmelCase , (tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def __snake_case ( __UpperCamelCase : Union[str, Any] ): """simple docstring""" A_ = AcceleratorState() A_ = tensor[None].clone().to(state.device ) A_ = gather(__UpperCamelCase ).cpu() A_ = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] ,__UpperCamelCase ): return False return True class _a : """simple docstring""" def __init__( self : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] ): A_ = returncode A_ = stdout A_ = stderr async def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Dict ): """simple docstring""" while True: A_ = await stream.readline() if line: callback(__UpperCamelCase ) else: break async def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : int=None ,__UpperCamelCase : Optional[Any]=None ,__UpperCamelCase : Tuple=None ,__UpperCamelCase : Tuple=False ,__UpperCamelCase : Any=False ): """simple docstring""" if echo: print("\nRunning: " ," ".join(__UpperCamelCase ) ) A_ = await asyncio.create_subprocess_exec( cmd[0] ,*cmd[1:] ,stdin=__UpperCamelCase ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=__UpperCamelCase ,) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) A_ = [] A_ = [] def tee(__UpperCamelCase : Optional[Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Dict ,__UpperCamelCase : List[Any]="" ): A_ = line.decode("utf-8" ).rstrip() sink.append(__UpperCamelCase ) if not quiet: print(__UpperCamelCase ,__UpperCamelCase ,file=__UpperCamelCase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout ,lambda __UpperCamelCase : tee(__UpperCamelCase ,__UpperCamelCase ,sys.stdout ,label="stdout:" ) ) ), asyncio.create_task(_read_stream(p.stderr ,lambda __UpperCamelCase : tee(__UpperCamelCase ,__UpperCamelCase ,sys.stderr ,label="stderr:" ) ) ), ] ,timeout=__UpperCamelCase ,) return _RunOutput(await p.wait() ,__UpperCamelCase ,__UpperCamelCase ) def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[Any]=None ,__UpperCamelCase : str=None ,__UpperCamelCase : Optional[Any]=180 ,__UpperCamelCase : str=False ,__UpperCamelCase : str=True ): """simple docstring""" A_ = asyncio.get_event_loop() A_ = loop.run_until_complete( _stream_subprocess(__UpperCamelCase ,env=__UpperCamelCase ,stdin=__UpperCamelCase ,timeout=__UpperCamelCase ,quiet=__UpperCamelCase ,echo=__UpperCamelCase ) ) A_ = " ".join(__UpperCamelCase ) if result.returncode > 0: A_ = "\n".join(result.stderr ) raise RuntimeError( f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' f'''The combined stderr from workers follows:\n{stderr}''' ) return result class _a ( snake_case_ ): """simple docstring""" pass def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Optional[int]=False ): """simple docstring""" try: A_ = subprocess.check_output(__UpperCamelCase ,stderr=subprocess.STDOUT ) if return_stdout: if hasattr(__UpperCamelCase ,"decode" ): A_ = output.decode("utf-8" ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( f'''Command `{" ".join(__UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
329
from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class _a : """simple docstring""" def __init__( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : List[str]=13 , UpperCAmelCase : Tuple=7 , UpperCAmelCase : int=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Optional[Any]=99 , UpperCAmelCase : str=32 , UpperCAmelCase : Dict=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Optional[int]=37 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : int=16 , UpperCAmelCase : Any=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : List[Any]=None , ): A_ = parent A_ = 13 A_ = 7 A_ = True A_ = True A_ = True A_ = True A_ = 99 A_ = 384 A_ = 2 A_ = 4 A_ = 37 A_ = "gelu" A_ = 0.1 A_ = 0.1 A_ = 512 A_ = 16 A_ = 2 A_ = 0.02 A_ = 3 A_ = 4 A_ = 128 A_ = 2 A_ = 9 A_ = 1 A_ = None def __A ( self : Optional[int] ): A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ = None if self.use_input_mask: A_ = random_attention_mask([self.batch_size, self.seq_length] ) A_ = None if self.use_token_type_ids: A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A_ = None A_ = None A_ = None if self.use_labels: A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A_ = ids_tensor([self.batch_size] , self.num_choices ) A_ = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ): A_ = TFConvBertModel(config=UpperCAmelCase ) A_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} A_ = [input_ids, input_mask] A_ = model(UpperCAmelCase ) A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Tuple ): A_ = TFConvBertForMaskedLM(config=UpperCAmelCase ) A_ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : int ): A_ = self.num_labels A_ = TFConvBertForSequenceClassification(config=UpperCAmelCase ) A_ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ): A_ = self.num_choices A_ = TFConvBertForMultipleChoice(config=UpperCAmelCase ) A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) A_ = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __A ( self : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str ): A_ = self.num_labels A_ = TFConvBertForTokenClassification(config=UpperCAmelCase ) A_ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __A ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ): A_ = TFConvBertForQuestionAnswering(config=UpperCAmelCase ) A_ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __A ( self : List[str] ): A_ = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) = config_and_inputs A_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class _a ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : Union[str, Any] = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) _lowerCamelCase : Any = ( { 'feature-extraction': TFConvBertModel, 'fill-mask': TFConvBertForMaskedLM, 'question-answering': TFConvBertForQuestionAnswering, 'text-classification': TFConvBertForSequenceClassification, 'token-classification': TFConvBertForTokenClassification, 'zero-shot': TFConvBertForSequenceClassification, } if is_tf_available() else {} ) _lowerCamelCase : Dict = False _lowerCamelCase : Optional[int] = False _lowerCamelCase : Dict = False def __A ( self : List[str] ): A_ = TFConvBertModelTester(self ) A_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 ) def __A ( self : Tuple ): self.config_tester.run_common_tests() def __A ( self : Tuple ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase ) def __A ( self : Dict ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase ) def __A ( self : List[Any] ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase ) def __A ( self : Dict ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase ) def __A ( self : int ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase ) def __A ( self : List[Any] ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase ) @slow def __A ( self : str ): A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() A_ = True A_ = True if hasattr(UpperCAmelCase , "use_cache" ): A_ = True A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase ) for model_class in self.all_model_classes: A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) A_ = model_class(UpperCAmelCase ) A_ = len(model(UpperCAmelCase ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCAmelCase , saved_model=UpperCAmelCase ) A_ = os.path.join(UpperCAmelCase , "saved_model" , "1" ) A_ = tf.keras.models.load_model(UpperCAmelCase ) A_ = model(UpperCAmelCase ) if self.is_encoder_decoder: A_ = outputs["encoder_hidden_states"] A_ = outputs["encoder_attentions"] else: A_ = outputs["hidden_states"] A_ = outputs["attentions"] self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase ) A_ = getattr( self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def __A ( self : List[str] ): A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) self.assertIsNotNone(UpperCAmelCase ) def __A ( self : Any ): A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() A_ = True A_ = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length ) A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase ) A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase ) def check_decoder_attentions_output(UpperCAmelCase : Optional[int] ): A_ = len(UpperCAmelCase ) self.assertEqual(out_len % 2 , 0 ) A_ = outputs.decoder_attentions self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(UpperCAmelCase : Optional[Any] ): A_ = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: A_ = True A_ = False A_ = model_class(UpperCAmelCase ) A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) A_ = len(UpperCAmelCase ) self.assertEqual(config.output_hidden_states , UpperCAmelCase ) check_encoder_attentions_output(UpperCAmelCase ) if self.is_encoder_decoder: A_ = model_class(UpperCAmelCase ) A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , UpperCAmelCase ) check_decoder_attentions_output(UpperCAmelCase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] A_ = True A_ = model_class(UpperCAmelCase ) A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , UpperCAmelCase ) check_encoder_attentions_output(UpperCAmelCase ) # Check attention is always last and order is fine A_ = True A_ = True A_ = model_class(UpperCAmelCase ) A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase ) ) self.assertEqual(model.config.output_hidden_states , UpperCAmelCase ) check_encoder_attentions_output(UpperCAmelCase ) @require_tf class _a ( unittest.TestCase ): """simple docstring""" @slow def __A ( self : Dict ): A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) A_ = tf.constant([[0, 1, 2, 3, 4, 5]] ) A_ = model(UpperCAmelCase )[0] A_ = [1, 6, 768] self.assertEqual(output.shape , UpperCAmelCase ) A_ = tf.constant( [ [ [-0.03_475_493, -0.4_686_034, -0.30_638_832], [0.22_637_248, -0.26_988_646, -0.7_423_424], [0.10_324_868, -0.45_013_508, -0.58_280_784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-4 )
329
1
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) __a :List[str] = '\\n Text data.\n Second line of data.' __a :str = 'file' @pytest.fixture(scope="session" ) def __snake_case ( __UpperCamelCase : Optional[Any] ): """simple docstring""" A_ = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd") A_ = bytes(__UpperCamelCase ,"utf-8" ) with zstd.open(__UpperCamelCase ,"wb" ) as f: f.write(__UpperCamelCase ) return path @pytest.fixture def __snake_case ( __UpperCamelCase : Any ): """simple docstring""" with open(os.path.join(tmpfs.local_root_dir ,__UpperCamelCase ) ,"w" ) as f: f.write(__UpperCamelCase ) return FILE_PATH @pytest.mark.parametrize("compression_format" ,["gzip", "xz", "zstd"] ) def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : str ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[Any] ): """simple docstring""" A_ = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path} A_ = input_paths[compression_format] A_ = tmp_path / "cache" A_ = DownloadConfig(cache_dir=__UpperCamelCase ,extract_compressed_file=__UpperCamelCase ) A_ = cached_path(__UpperCamelCase ,download_config=__UpperCamelCase ) with open(__UpperCamelCase ) as f: A_ = f.read() with open(__UpperCamelCase ) as f: A_ = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize("default_extracted" ,[True, False] ) @pytest.mark.parametrize("default_cache_dir" ,[True, False] ) def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : List[Any] ,__UpperCamelCase : Tuple ): """simple docstring""" A_ = "custom_cache" A_ = "custom_extracted_dir" A_ = tmp_path / "custom_extracted_path" if default_extracted: A_ = ("downloads" if default_cache_dir else custom_cache_dir, "extracted") else: monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" ,__UpperCamelCase ) monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" ,str(__UpperCamelCase ) ) A_ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) A_ = xz_file A_ = ( DownloadConfig(extract_compressed_file=__UpperCamelCase ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir ,extract_compressed_file=__UpperCamelCase ) ) A_ = cached_path(__UpperCamelCase ,download_config=__UpperCamelCase ) assert Path(__UpperCamelCase ).parent.parts[-2:] == expected def __snake_case ( __UpperCamelCase : Optional[int] ): """simple docstring""" A_ = str(Path(__UpperCamelCase ).resolve() ) assert cached_path(__UpperCamelCase ) == text_file # relative path A_ = str(Path(__UpperCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(__UpperCamelCase ) == text_file def __snake_case ( __UpperCamelCase : str ): """simple docstring""" A_ = str(tmp_path.resolve() / "__missing_file__.txt" ) with pytest.raises(__UpperCamelCase ): cached_path(__UpperCamelCase ) # relative path A_ = "./__missing_file__.txt" with pytest.raises(__UpperCamelCase ): cached_path(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : List[str] ): """simple docstring""" A_ = get_from_cache(f'''tmp://{tmpfs_file}''' ) with open(__UpperCamelCase ) as f: A_ = f.read() assert output_file_content == FILE_CONTENT @patch("datasets.config.HF_DATASETS_OFFLINE" ,__UpperCamelCase ) def __snake_case ( ): """simple docstring""" with pytest.raises(__UpperCamelCase ): cached_path("https://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" ,__UpperCamelCase ) def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" A_ = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(__UpperCamelCase ): http_get("https://huggingface.co" ,temp_file=__UpperCamelCase ) with pytest.raises(__UpperCamelCase ): http_head("https://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" ,__UpperCamelCase ) def __snake_case ( __UpperCamelCase : List[str] ): """simple docstring""" A_ = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(__UpperCamelCase ): ftp_get("ftp://huggingface.co" ,temp_file=__UpperCamelCase ) with pytest.raises(__UpperCamelCase ): ftp_head("ftp://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" ,__UpperCamelCase ) def __snake_case ( __UpperCamelCase : int ): """simple docstring""" A_ = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(__UpperCamelCase ): fsspec_get("s3://huggingface.co" ,temp_file=__UpperCamelCase ) with pytest.raises(__UpperCamelCase ): fsspec_head("s3://huggingface.co" )
329
from ...configuration_utils import PretrainedConfig from ...utils import logging __a :Dict = logging.get_logger(__name__) __a :int = { 'google/realm-cc-news-pretrained-embedder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-encoder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-scorer': ( 'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-openqa': ( 'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json' ), 'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json', 'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json', 'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json', 'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json', # See all REALM models at https://huggingface.co/models?filter=realm } class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : List[Any] = 'realm' def __init__( self : Union[str, Any] , UpperCAmelCase : Optional[Any]=30522 , UpperCAmelCase : List[str]=768 , UpperCAmelCase : Optional[Any]=128 , UpperCAmelCase : str=12 , UpperCAmelCase : Dict=12 , UpperCAmelCase : Optional[Any]=8 , UpperCAmelCase : Any=3072 , UpperCAmelCase : Union[str, Any]="gelu_new" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : int=512 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=1E-12 , UpperCAmelCase : List[Any]=256 , UpperCAmelCase : Optional[int]=10 , UpperCAmelCase : List[str]=1E-3 , UpperCAmelCase : Any=5 , UpperCAmelCase : List[Any]=320 , UpperCAmelCase : Optional[Any]=13353718 , UpperCAmelCase : Tuple=5000 , UpperCAmelCase : List[str]=1 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Union[str, Any]=2 , **UpperCAmelCase : List[str] , ): super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase ) # Common config A_ = vocab_size A_ = max_position_embeddings A_ = hidden_size A_ = retriever_proj_size A_ = num_hidden_layers A_ = num_attention_heads A_ = num_candidates A_ = intermediate_size A_ = hidden_act A_ = hidden_dropout_prob A_ = attention_probs_dropout_prob A_ = initializer_range A_ = type_vocab_size A_ = layer_norm_eps # Reader config A_ = span_hidden_size A_ = max_span_width A_ = reader_layer_norm_eps A_ = reader_beam_size A_ = reader_seq_len # Retrieval config A_ = num_block_records A_ = searcher_beam_size
329
1
import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict=10 ): """simple docstring""" A_ = [] for _ in range(__UpperCamelCase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Tuple=10 ): """simple docstring""" A_ = [] for step in range(__UpperCamelCase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: A_ = os.path.join(__UpperCamelCase ,"schedule.bin" ) torch.save(scheduler.state_dict() ,__UpperCamelCase ) A_ = torch.load(__UpperCamelCase ) scheduler.load_state_dict(__UpperCamelCase ) return lrs @require_torch class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : Any , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ): self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) ) for a, b in zip(UpperCAmelCase , UpperCAmelCase ): self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase ) def __A ( self : List[Any] ): A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase ) A_ = torch.tensor([0.4, 0.2, -0.5] ) A_ = nn.MSELoss() # No warmup, constant schedule, no gradient clipping A_ = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 ) for _ in range(100 ): A_ = criterion(UpperCAmelCase , UpperCAmelCase ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) def __A ( self : Dict ): A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase ) A_ = torch.tensor([0.4, 0.2, -0.5] ) A_ = nn.MSELoss() # No warmup, constant schedule, no gradient clipping A_ = Adafactor( params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCAmelCase , weight_decay=0.0 , relative_step=UpperCAmelCase , scale_parameter=UpperCAmelCase , warmup_init=UpperCAmelCase , ) for _ in range(1000 ): A_ = criterion(UpperCAmelCase , UpperCAmelCase ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) @require_torch class _a ( unittest.TestCase ): """simple docstring""" _lowerCamelCase : Optional[int] = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None _lowerCamelCase : Any = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None _lowerCamelCase : Any = 1_0 def __A ( self : str , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Dict=None ): self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) ) for a, b in zip(UpperCAmelCase , UpperCAmelCase ): self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase , msg=UpperCAmelCase ) def __A ( self : List[Any] ): A_ = {"num_warmup_steps": 2, "num_training_steps": 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) A_ = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"num_warmup_steps": 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, "num_cycles": 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, "power": 2.0, "lr_end": 1E-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {"num_warmup_steps": 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): A_ , A_ = data A_ = scheduler_func(self.optimizer , **UpperCAmelCase ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) A_ = unwrap_schedule(UpperCAmelCase , self.num_steps ) self.assertListAlmostEqual( UpperCAmelCase , UpperCAmelCase , tol=1E-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , ) A_ = scheduler_func(self.optimizer , **UpperCAmelCase ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(UpperCAmelCase ) # wrap to test picklability of the schedule A_ = unwrap_and_save_reload_schedule(UpperCAmelCase , self.num_steps ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase , msg=f'''failed for {scheduler_func} in save and reload''' ) class _a : """simple docstring""" def __init__( self : List[str] , UpperCAmelCase : List[str] ): A_ = fn def __call__( self : Union[str, Any] , *UpperCAmelCase : str , **UpperCAmelCase : Optional[Any] ): return self.fn(*UpperCAmelCase , **UpperCAmelCase ) @classmethod def __A ( self : Dict , UpperCAmelCase : List[str] ): A_ = list(map(self , scheduler.lr_lambdas ) )
329
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() __a :Optional[Any] = logging.get_logger(__name__) def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ): """simple docstring""" A_ = original_name.split("." )[0] A_ = key.split("." ) A_ = int(key_list[key_list.index(__UpperCamelCase ) - 2] ) A_ = int(key_list[key_list.index(__UpperCamelCase ) - 1] ) A_ = orig_block_num - offset A_ = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' ,f'''block.{new_block_num}.{layer_num}.{new_name}''' ) return key def __snake_case ( __UpperCamelCase : Any ): """simple docstring""" A_ = OrderedDict() A_ , A_ = 0, 0 for key, value in state_dict.items(): if key.startswith("network" ): A_ = key.replace("network" ,"poolformer.encoder" ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith("bias" ) and "patch_embed" not in key: patch_emb_offset += 1 A_ = key[: key.find("proj" )] A_ = key.replace(__UpperCamelCase ,f'''patch_embeddings.{total_embed_found}.''' ) A_ = key.replace("proj" ,"projection" ) if key.endswith("bias" ): total_embed_found += 1 if "patch_embeddings" in key: A_ = "poolformer.encoder." + key if "mlp.fc1" in key: A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc1" ,"output.conv1" ) if "mlp.fc2" in key: A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc2" ,"output.conv2" ) if "norm1" in key: A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm1" ,"before_norm" ) if "norm2" in key: A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm2" ,"after_norm" ) if "layer_scale_1" in key: A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_1" ,"layer_scale_1" ) if "layer_scale_2" in key: A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_2" ,"layer_scale_2" ) if "head" in key: A_ = key.replace("head" ,"classifier" ) A_ = value return new_state_dict def __snake_case ( ): """simple docstring""" A_ = "http://images.cocodataset.org/val2017/000000039769.jpg" A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw ) return image @torch.no_grad() def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ): """simple docstring""" A_ = PoolFormerConfig() # set attributes based on model_name A_ = "huggingface/label-files" A_ = model_name[-3:] A_ = 1000 A_ = "imagenet-1k-id2label.json" A_ = (1, 1000) # set config attributes A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) ) A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()} A_ = idalabel A_ = {v: k for k, v in idalabel.items()} if size == "s12": A_ = [2, 2, 6, 2] A_ = [64, 128, 320, 512] A_ = 4.0 A_ = 0.9 elif size == "s24": A_ = [4, 4, 12, 4] A_ = [64, 128, 320, 512] A_ = 4.0 A_ = 0.9 elif size == "s36": A_ = [6, 6, 18, 6] A_ = [64, 128, 320, 512] A_ = 4.0 A_ = 1E-6 A_ = 0.9 elif size == "m36": A_ = [6, 6, 18, 6] A_ = [96, 192, 384, 768] A_ = 4.0 A_ = 1E-6 A_ = 0.95 elif size == "m48": A_ = [8, 8, 24, 8] A_ = [96, 192, 384, 768] A_ = 4.0 A_ = 1E-6 A_ = 0.95 else: raise ValueError(f'''Size {size} not supported''' ) # load image processor A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase ) # Prepare image A_ = prepare_img() A_ = image_processor(images=__UpperCamelCase ,return_tensors="pt" ).pixel_values logger.info(f'''Converting model {model_name}...''' ) # load original state dict A_ = torch.load(__UpperCamelCase ,map_location=torch.device("cpu" ) ) # rename keys A_ = rename_keys(__UpperCamelCase ) # create HuggingFace model and load state dict A_ = PoolFormerForImageClassification(__UpperCamelCase ) model.load_state_dict(__UpperCamelCase ) model.eval() # Define image processor A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase ) A_ = image_processor(images=prepare_img() ,return_tensors="pt" ).pixel_values # forward pass A_ = model(__UpperCamelCase ) A_ = outputs.logits # define expected logit slices for different models if size == "s12": A_ = torch.tensor([-0.3045, -0.6758, -0.4869] ) elif size == "s24": A_ = torch.tensor([0.4402, -0.1374, -0.8045] ) elif size == "s36": A_ = torch.tensor([-0.6080, -0.5133, -0.5898] ) elif size == "m36": A_ = torch.tensor([0.3952, 0.2263, -1.2668] ) elif size == "m48": A_ = torch.tensor([0.1167, -0.0656, -0.3423] ) else: raise ValueError(f'''Size {size} not supported''' ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1E-2 ) # finally, save model and image processor logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) model.save_pretrained(__UpperCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __a :Union[str, Any] = argparse.ArgumentParser() parser.add_argument( '--model_name', default='poolformer_s12', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) __a :int = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
329
1
import os import random import sys from . import cryptomath_module as cryptoMath # noqa: N812 from . import rabin_miller as rabinMiller # noqa: N812 def __snake_case ( ): """simple docstring""" print("Making key files..." ) make_key_files("rsa" ,1024 ) print("Key files generation successful." ) def __snake_case ( __UpperCamelCase : int ): """simple docstring""" print("Generating prime p..." ) A_ = rabinMiller.generate_large_prime(__UpperCamelCase ) print("Generating prime q..." ) A_ = rabinMiller.generate_large_prime(__UpperCamelCase ) A_ = p * q print("Generating e that is relatively prime to (p - 1) * (q - 1)..." ) while True: A_ = random.randrange(2 ** (key_size - 1) ,2 ** (key_size) ) if cryptoMath.gcd(__UpperCamelCase ,(p - 1) * (q - 1) ) == 1: break print("Calculating d that is mod inverse of e..." ) A_ = cryptoMath.find_mod_inverse(__UpperCamelCase ,(p - 1) * (q - 1) ) A_ = (n, e) A_ = (n, d) return (public_key, private_key) def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : int ): """simple docstring""" if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ): print("\nWARNING:" ) print( f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n''' "Use a different name or delete these files and re-run this program." ) sys.exit() A_ , A_ = generate_key(__UpperCamelCase ) print(f'''\nWriting public key to file {name}_pubkey.txt...''' ) with open(f'''{name}_pubkey.txt''' ,"w" ) as out_file: out_file.write(f'''{key_size},{public_key[0]},{public_key[1]}''' ) print(f'''Writing private key to file {name}_privkey.txt...''' ) with open(f'''{name}_privkey.txt''' ,"w" ) as out_file: out_file.write(f'''{key_size},{private_key[0]},{private_key[1]}''' ) if __name__ == "__main__": main()
329
import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : torch.FloatTensor _lowerCamelCase : Optional[torch.FloatTensor] = None def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Any=0.999 ,__UpperCamelCase : Any="cosine" ,): """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(__UpperCamelCase : Any ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(__UpperCamelCase : int ): return math.exp(t * -12.0 ) else: raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) A_ = [] for i in range(__UpperCamelCase ): A_ = i / num_diffusion_timesteps A_ = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(__UpperCamelCase ) / alpha_bar_fn(__UpperCamelCase ) ,__UpperCamelCase ) ) return torch.tensor(__UpperCamelCase ,dtype=torch.floataa ) class _a ( snake_case_ , snake_case_ ): """simple docstring""" @register_to_config def __init__( self : Optional[int] , UpperCAmelCase : int = 1000 , UpperCAmelCase : str = "fixed_small_log" , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[float] = 1.0 , UpperCAmelCase : str = "epsilon" , UpperCAmelCase : str = "squaredcos_cap_v2" , ): if beta_schedule != "squaredcos_cap_v2": raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" ) A_ = betas_for_alpha_bar(UpperCAmelCase ) A_ = 1.0 - self.betas A_ = torch.cumprod(self.alphas , dim=0 ) A_ = torch.tensor(1.0 ) # standard deviation of the initial noise distribution A_ = 1.0 # setable values A_ = None A_ = torch.from_numpy(np.arange(0 , UpperCAmelCase )[::-1].copy() ) A_ = variance_type def __A ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None ): return sample def __A ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, torch.device] = None ): A_ = num_inference_steps A_ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) A_ = (np.arange(0 , UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa ) A_ = torch.from_numpy(UpperCAmelCase ).to(UpperCAmelCase ) def __A ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str=None , UpperCAmelCase : Any=None , UpperCAmelCase : List[Any]=None ): if prev_timestep is None: A_ = t - 1 A_ = self.alphas_cumprod[t] A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one A_ = 1 - alpha_prod_t A_ = 1 - alpha_prod_t_prev if prev_timestep == t - 1: A_ = self.betas[t] else: A_ = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample A_ = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: A_ = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": A_ = torch.log(torch.clamp(UpperCAmelCase , min=1E-20 ) ) A_ = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler A_ = variance.log() A_ = beta.log() A_ = (predicted_variance + 1) / 2 A_ = frac * max_log + (1 - frac) * min_log return variance def __A ( self : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Dict=None , UpperCAmelCase : bool = True , ): A_ = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": A_ , A_ = torch.split(UpperCAmelCase , sample.shape[1] , dim=1 ) else: A_ = None # 1. compute alphas, betas if prev_timestep is None: A_ = t - 1 A_ = self.alphas_cumprod[t] A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one A_ = 1 - alpha_prod_t A_ = 1 - alpha_prod_t_prev if prev_timestep == t - 1: A_ = self.betas[t] A_ = self.alphas[t] else: A_ = 1 - alpha_prod_t / alpha_prod_t_prev A_ = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": A_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": A_ = model_output else: raise ValueError( f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`''' " for the UnCLIPScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: A_ = torch.clamp( UpperCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf A_ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t A_ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf A_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise A_ = 0 if t > 0: A_ = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=UpperCAmelCase , device=model_output.device ) A_ = self._get_variance( UpperCAmelCase , predicted_variance=UpperCAmelCase , prev_timestep=UpperCAmelCase , ) if self.variance_type == "fixed_small_log": A_ = variance elif self.variance_type == "learned_range": A_ = (0.5 * variance).exp() else: raise ValueError( f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`''' " for the UnCLIPScheduler." ) A_ = variance * variance_noise A_ = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=UpperCAmelCase , pred_original_sample=UpperCAmelCase ) def __A ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.IntTensor , ): # Make sure alphas_cumprod and timestep have same device and dtype as original_samples A_ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) A_ = timesteps.to(original_samples.device ) A_ = alphas_cumprod[timesteps] ** 0.5 A_ = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): A_ = sqrt_alpha_prod.unsqueeze(-1 ) A_ = (1 - alphas_cumprod[timesteps]) ** 0.5 A_ = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): A_ = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) A_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
329
1
import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : Union[str, Any] ): A_ = tempfile.mkdtemp() A_ = SamImageProcessor() A_ = SamProcessor(UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def __A ( self : Union[str, Any] , **UpperCAmelCase : Any ): return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).image_processor def __A ( self : Tuple ): shutil.rmtree(self.tmpdirname ) def __A ( self : int ): A_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] A_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def __A ( self : Tuple ): A_ = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 ) A_ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase ) def __A ( self : Dict ): A_ = self.get_image_processor() A_ = SamProcessor(image_processor=UpperCAmelCase ) A_ = self.prepare_image_inputs() A_ = image_processor(UpperCAmelCase , return_tensors="np" ) A_ = processor(images=UpperCAmelCase , return_tensors="np" ) input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor input_feat_extract.pop("reshaped_input_sizes" ) # pop original_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) @require_torch def __A ( self : Optional[Any] ): A_ = self.get_image_processor() A_ = SamProcessor(image_processor=UpperCAmelCase ) A_ = [torch.ones((1, 3, 5, 5) )] A_ = [[1764, 2646]] A_ = [[683, 1024]] A_ = processor.post_process_masks(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) A_ = processor.post_process_masks( UpperCAmelCase , torch.tensor(UpperCAmelCase ) , torch.tensor(UpperCAmelCase ) ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) # should also work with np A_ = [np.ones((1, 3, 5, 5) )] A_ = processor.post_process_masks(UpperCAmelCase , np.array(UpperCAmelCase ) , np.array(UpperCAmelCase ) ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) A_ = [[1, 0], [0, 1]] with self.assertRaises(UpperCAmelCase ): A_ = processor.post_process_masks(UpperCAmelCase , np.array(UpperCAmelCase ) , np.array(UpperCAmelCase ) ) @require_vision @require_tf class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : Optional[int] ): A_ = tempfile.mkdtemp() A_ = SamImageProcessor() A_ = SamProcessor(UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def __A ( self : Optional[Any] , **UpperCAmelCase : Optional[Any] ): return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).image_processor def __A ( self : str ): shutil.rmtree(self.tmpdirname ) def __A ( self : Optional[Any] ): A_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] A_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def __A ( self : str ): A_ = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 ) A_ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase ) def __A ( self : Optional[int] ): A_ = self.get_image_processor() A_ = SamProcessor(image_processor=UpperCAmelCase ) A_ = self.prepare_image_inputs() A_ = image_processor(UpperCAmelCase , return_tensors="np" ) A_ = processor(images=UpperCAmelCase , return_tensors="np" ) input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor input_feat_extract.pop("reshaped_input_sizes" ) # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) @require_tf def __A ( self : Tuple ): A_ = self.get_image_processor() A_ = SamProcessor(image_processor=UpperCAmelCase ) A_ = [tf.ones((1, 3, 5, 5) )] A_ = [[1764, 2646]] A_ = [[683, 1024]] A_ = processor.post_process_masks(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , return_tensors="tf" ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) A_ = processor.post_process_masks( UpperCAmelCase , tf.convert_to_tensor(UpperCAmelCase ) , tf.convert_to_tensor(UpperCAmelCase ) , return_tensors="tf" , ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) # should also work with np A_ = [np.ones((1, 3, 5, 5) )] A_ = processor.post_process_masks( UpperCAmelCase , np.array(UpperCAmelCase ) , np.array(UpperCAmelCase ) , return_tensors="tf" ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) A_ = [[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError ): A_ = processor.post_process_masks( UpperCAmelCase , np.array(UpperCAmelCase ) , np.array(UpperCAmelCase ) , return_tensors="tf" ) @require_vision @require_torchvision class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : Optional[Any] ): A_ = tempfile.mkdtemp() A_ = SamImageProcessor() A_ = SamProcessor(UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def __A ( self : int , **UpperCAmelCase : int ): return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).image_processor def __A ( self : Tuple ): shutil.rmtree(self.tmpdirname ) def __A ( self : Union[str, Any] ): A_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] A_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def __A ( self : str ): A_ = self.get_image_processor() A_ = SamProcessor(image_processor=UpperCAmelCase ) A_ = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa ) A_ = [tf.convert_to_tensor(UpperCAmelCase )] A_ = [torch.tensor(UpperCAmelCase )] A_ = [[1764, 2646]] A_ = [[683, 1024]] A_ = processor.post_process_masks( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , return_tensors="tf" ) A_ = processor.post_process_masks( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , return_tensors="pt" ) self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) ) @is_pt_tf_cross_test def __A ( self : Tuple ): A_ = self.get_image_processor() A_ = SamProcessor(image_processor=UpperCAmelCase ) A_ = self.prepare_image_inputs() A_ = image_processor(UpperCAmelCase , return_tensors="pt" )["pixel_values"].numpy() A_ = processor(images=UpperCAmelCase , return_tensors="pt" )["pixel_values"].numpy() A_ = image_processor(UpperCAmelCase , return_tensors="tf" )["pixel_values"].numpy() A_ = processor(images=UpperCAmelCase , return_tensors="tf" )["pixel_values"].numpy() self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase ) ) self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase ) ) self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase ) )
329
from math import isqrt, loga def __snake_case ( __UpperCamelCase : int ): """simple docstring""" A_ = [True] * max_number for i in range(2 ,isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 ,__UpperCamelCase ,__UpperCamelCase ): A_ = False return [i for i in range(2 ,__UpperCamelCase ) if is_prime[i]] def __snake_case ( __UpperCamelCase : int = 80_0800 ,__UpperCamelCase : int = 80_0800 ): """simple docstring""" A_ = degree * loga(__UpperCamelCase ) A_ = int(__UpperCamelCase ) A_ = calculate_prime_numbers(__UpperCamelCase ) A_ = 0 A_ = 0 A_ = len(__UpperCamelCase ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(F"{solution() = }")
329
1
import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" if ( (cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F) or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) # or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) # or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) # or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) # or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) # or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F) or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) # ): # return True return False def __snake_case ( __UpperCamelCase : str ): """simple docstring""" for char in word: A_ = ord(__UpperCamelCase ) if not _is_chinese_char(__UpperCamelCase ): return 0 return 1 def __snake_case ( __UpperCamelCase : List[str] ): """simple docstring""" A_ = set() for token in tokens: A_ = len(__UpperCamelCase ) > 1 and is_chinese(__UpperCamelCase ) if chinese_word: word_set.add(__UpperCamelCase ) A_ = list(__UpperCamelCase ) return word_list def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : set() ): """simple docstring""" if not chinese_word_set: return bert_tokens A_ = max([len(__UpperCamelCase ) for w in chinese_word_set] ) A_ = bert_tokens A_ , A_ = 0, len(__UpperCamelCase ) while start < end: A_ = True if is_chinese(bert_word[start] ): A_ = min(end - start ,__UpperCamelCase ) for i in range(__UpperCamelCase ,1 ,-1 ): A_ = "".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 ,start + i ): A_ = "##" + bert_word[j] A_ = start + i A_ = False break if single_word: start += 1 return bert_word def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : LTP ,__UpperCamelCase : BertTokenizer ): """simple docstring""" A_ = [] for i in range(0 ,len(__UpperCamelCase ) ,100 ): A_ = ltp_tokenizer.seg(lines[i : i + 100] )[0] A_ = [get_chinese_word(__UpperCamelCase ) for r in res] ltp_res.extend(__UpperCamelCase ) assert len(__UpperCamelCase ) == len(__UpperCamelCase ) A_ = [] for i in range(0 ,len(__UpperCamelCase ) ,100 ): A_ = bert_tokenizer(lines[i : i + 100] ,add_special_tokens=__UpperCamelCase ,truncation=__UpperCamelCase ,max_length=512 ) bert_res.extend(res["input_ids"] ) assert len(__UpperCamelCase ) == len(__UpperCamelCase ) A_ = [] for input_ids, chinese_word in zip(__UpperCamelCase ,__UpperCamelCase ): A_ = [] for id in input_ids: A_ = bert_tokenizer._convert_id_to_token(__UpperCamelCase ) input_tokens.append(__UpperCamelCase ) A_ = add_sub_symbol(__UpperCamelCase ,__UpperCamelCase ) A_ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(__UpperCamelCase ): if token[:2] == "##": A_ = token[2:] # save chinese tokens' pos if len(__UpperCamelCase ) == 1 and _is_chinese_char(ord(__UpperCamelCase ) ): ref_id.append(__UpperCamelCase ) ref_ids.append(__UpperCamelCase ) assert len(__UpperCamelCase ) == len(__UpperCamelCase ) return ref_ids def __snake_case ( __UpperCamelCase : Dict ): """simple docstring""" with open(args.file_name ,"r" ,encoding="utf-8" ) as f: A_ = f.readlines() A_ = [line.strip() for line in data if len(__UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' A_ = LTP(args.ltp ) # faster in GPU device A_ = BertTokenizer.from_pretrained(args.bert ) A_ = prepare_ref(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) with open(args.save_path ,"w" ,encoding="utf-8" ) as f: A_ = [json.dumps(__UpperCamelCase ) + "\n" for ref in ref_ids] f.writelines(__UpperCamelCase ) if __name__ == "__main__": __a :List[Any] = argparse.ArgumentParser(description='prepare_chinese_ref') parser.add_argument( '--file_name', type=str, default='./resources/chinese-demo.txt', help='file need process, same as training data in lm', ) parser.add_argument( '--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path' ) parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer') parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res') __a :Dict = parser.parse_args() main(args)
329
import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() __a :str = logging.get_logger(__name__) def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ): """simple docstring""" A_ = RobertaPreLayerNormConfig.from_pretrained( __UpperCamelCase ,architectures=["RobertaPreLayerNormForMaskedLM"] ) # convert state_dict A_ = torch.load(hf_hub_download(repo_id=__UpperCamelCase ,filename="pytorch_model.bin" ) ) A_ = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith("roberta." ): A_ = "roberta_prelayernorm." + tensor_key[len("roberta." ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ): continue A_ = tensor_value A_ = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=__UpperCamelCase ,config=__UpperCamelCase ,state_dict=__UpperCamelCase ) model.save_pretrained(__UpperCamelCase ) # convert tokenizer A_ = AutoTokenizer.from_pretrained(__UpperCamelCase ) tokenizer.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __a :Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint-repo', default=None, type=str, required=True, help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __a :Any = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
329
1
from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
329
from maths.prime_factors import prime_factors def __snake_case ( __UpperCamelCase : int ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = f'''Input value of [number={number}] must be an integer''' raise TypeError(__UpperCamelCase ) if number < 1: raise ValueError("Input must be a positive integer" ) return -1 if len(prime_factors(__UpperCamelCase ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
329
1
from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline __a :Optional[int] = logging.get_logger(__name__) @add_end_docstrings(snake_case_ ) class _a ( snake_case_ ): """simple docstring""" def __init__( self : Optional[int] , **UpperCAmelCase : Tuple ): super().__init__(**UpperCAmelCase ) if self.framework != "pt": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) # No specific FOR_XXX available yet def __call__( self : List[str] , UpperCAmelCase : Union[np.ndarray, bytes, str] , **UpperCAmelCase : Optional[Any] ): return super().__call__(UpperCAmelCase , **UpperCAmelCase ) def __A ( self : Tuple , **UpperCAmelCase : List[str] ): A_ = {} if "candidate_labels" in kwargs: A_ = kwargs["candidate_labels"] if "hypothesis_template" in kwargs: A_ = kwargs["hypothesis_template"] return preprocess_params, {}, {} def __A ( self : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : int=None , UpperCAmelCase : Optional[int]="This is a sound of {}." ): if isinstance(UpperCAmelCase , UpperCAmelCase ): if audio.startswith("http://" ) or audio.startswith("https://" ): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png A_ = requests.get(UpperCAmelCase ).content else: with open(UpperCAmelCase , "rb" ) as f: A_ = f.read() if isinstance(UpperCAmelCase , UpperCAmelCase ): A_ = ffmpeg_read(UpperCAmelCase , self.feature_extractor.sampling_rate ) if not isinstance(UpperCAmelCase , np.ndarray ): raise ValueError("We expect a numpy ndarray as input" ) if len(audio.shape ) != 1: raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline" ) A_ = self.feature_extractor( [audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="pt" ) A_ = candidate_labels A_ = [hypothesis_template.format(UpperCAmelCase ) for x in candidate_labels] A_ = self.tokenizer(UpperCAmelCase , return_tensors=self.framework , padding=UpperCAmelCase ) A_ = [text_inputs] return inputs def __A ( self : Dict , UpperCAmelCase : Any ): A_ = model_inputs.pop("candidate_labels" ) A_ = model_inputs.pop("text_inputs" ) if isinstance(text_inputs[0] , UpperCAmelCase ): A_ = text_inputs[0] else: # Batching case. A_ = text_inputs[0][0] A_ = self.model(**UpperCAmelCase , **UpperCAmelCase ) A_ = { "candidate_labels": candidate_labels, "logits": outputs.logits_per_audio, } return model_outputs def __A ( self : str , UpperCAmelCase : List[Any] ): A_ = model_outputs.pop("candidate_labels" ) A_ = model_outputs["logits"][0] if self.framework == "pt": A_ = logits.softmax(dim=0 ) A_ = probs.tolist() else: raise ValueError("`tf` framework not supported." ) A_ = [ {"score": score, "label": candidate_label} for score, candidate_label in sorted(zip(UpperCAmelCase , UpperCAmelCase ) , key=lambda UpperCAmelCase : -x[0] ) ] return result
329
import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore __a :int = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" __a :Any = [file for file in filepaths if file != file.lower()] if upper_files: print(F"{len(upper_files)} files contain uppercase characters:") print('\n'.join(upper_files) + '\n') __a :Tuple = [file for file in filepaths if ' ' in file] if space_files: print(F"{len(space_files)} files contain space characters:") print('\n'.join(space_files) + '\n') __a :str = [file for file in filepaths if '-' in file] if hyphen_files: print(F"{len(hyphen_files)} files contain hyphen characters:") print('\n'.join(hyphen_files) + '\n') __a :List[str] = [file for file in filepaths if os.sep not in file] if nodir_files: print(F"{len(nodir_files)} files are not in a directory:") print('\n'.join(nodir_files) + '\n') __a :Any = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
329
1
import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class _a ( unittest.TestCase ): """simple docstring""" @property def __A ( self : Optional[int] ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __A ( self : int ): A_ = ort.SessionOptions() A_ = False return options def __A ( self : List[str] ): A_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png" ) A_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" ) A_ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy" ) # using the PNDM scheduler by default A_ = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=UpperCAmelCase , feature_extractor=UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCAmelCase ) A_ = "A red cat sitting on a park bench" A_ = np.random.RandomState(0 ) A_ = pipe( prompt=UpperCAmelCase , image=UpperCAmelCase , mask_image=UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=UpperCAmelCase , output_type="np" , ) A_ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1E-2
329
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __a :Union[str, Any] = { 'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'], 'tokenization_biogpt': ['BioGptTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Optional[int] = [ 'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BioGptForCausalLM', 'BioGptForTokenClassification', 'BioGptForSequenceClassification', 'BioGptModel', 'BioGptPreTrainedModel', ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys __a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
329
1
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler') class _a : """simple docstring""" def __init__( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : bool = True , UpperCAmelCase : bool = False ): A_ = scheduler A_ = optimizers if isinstance(UpperCAmelCase , (list, tuple) ) else [optimizers] A_ = split_batches A_ = step_with_optimizer A_ = GradientState() def __A ( self : Any , *UpperCAmelCase : str , **UpperCAmelCase : Tuple ): if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*UpperCAmelCase , **UpperCAmelCase ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*UpperCAmelCase , **UpperCAmelCase ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step A_ = AcceleratorState().num_processes for _ in range(UpperCAmelCase ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , "total_steps" ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*UpperCAmelCase , **UpperCAmelCase ) else: self.scheduler.step(*UpperCAmelCase , **UpperCAmelCase ) def __A ( self : Optional[Any] ): return self.scheduler.get_last_lr() def __A ( self : List[Any] ): return self.scheduler.state_dict() def __A ( self : List[Any] , UpperCAmelCase : Tuple ): self.scheduler.load_state_dict(UpperCAmelCase ) def __A ( self : Dict ): return self.scheduler.get_lr() def __A ( self : List[Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : List[str] ): return self.scheduler.print_lr(*UpperCAmelCase , **UpperCAmelCase )
329
import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def __snake_case ( __UpperCamelCase : Union[str, Any] ): """simple docstring""" if is_torch_version("<" ,"2.0.0" ) or not hasattr(__UpperCamelCase ,"_dynamo" ): return False return isinstance(__UpperCamelCase ,torch._dynamo.eval_frame.OptimizedModule ) def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : bool = True ): """simple docstring""" A_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) A_ = is_compiled_module(__UpperCamelCase ) if is_compiled: A_ = model A_ = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = model.module if not keep_fpaa_wrapper: A_ = getattr(__UpperCamelCase ,"forward" ) A_ = model.__dict__.pop("_original_forward" ,__UpperCamelCase ) if original_forward is not None: while hasattr(__UpperCamelCase ,"__wrapped__" ): A_ = forward.__wrapped__ if forward == original_forward: break A_ = forward if getattr(__UpperCamelCase ,"_converted_to_transformer_engine" ,__UpperCamelCase ): convert_model(__UpperCamelCase ,to_transformer_engine=__UpperCamelCase ) if is_compiled: A_ = model A_ = compiled_model return model def __snake_case ( ): """simple docstring""" PartialState().wait_for_everyone() def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Any ): """simple docstring""" if PartialState().distributed_type == DistributedType.TPU: xm.save(__UpperCamelCase ,__UpperCamelCase ) elif PartialState().local_process_index == 0: torch.save(__UpperCamelCase ,__UpperCamelCase ) @contextmanager def __snake_case ( **__UpperCamelCase : Any ): """simple docstring""" for key, value in kwargs.items(): A_ = str(__UpperCamelCase ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def __snake_case ( __UpperCamelCase : Optional[Any] ): """simple docstring""" if not hasattr(__UpperCamelCase ,"__qualname__" ) and not hasattr(__UpperCamelCase ,"__name__" ): A_ = getattr(__UpperCamelCase ,"__class__" ,__UpperCamelCase ) if hasattr(__UpperCamelCase ,"__qualname__" ): return obj.__qualname__ if hasattr(__UpperCamelCase ,"__name__" ): return obj.__name__ return str(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ): """simple docstring""" for key, value in source.items(): if isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = destination.setdefault(__UpperCamelCase ,{} ) merge_dicts(__UpperCamelCase ,__UpperCamelCase ) else: A_ = value return destination def __snake_case ( __UpperCamelCase : int = None ): """simple docstring""" if port is None: A_ = 2_9500 with socket.socket(socket.AF_INET ,socket.SOCK_STREAM ) as s: return s.connect_ex(("localhost", port) ) == 0
329
1
import os def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" A_ = len(grid[0] ) A_ = len(__UpperCamelCase ) A_ = 0 A_ = 0 A_ = 0 # Check vertically, horizontally, diagonally at the same time (only works # for nxn grid) for i in range(__UpperCamelCase ): for j in range(n_rows - 3 ): A_ = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i] A_ = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3] # Left-to-right diagonal (\) product if i < n_columns - 3: A_ = ( grid[i][j] * grid[i + 1][j + 1] * grid[i + 2][j + 2] * grid[i + 3][j + 3] ) # Right-to-left diagonal(/) product if i > 2: A_ = ( grid[i][j] * grid[i - 1][j + 1] * grid[i - 2][j + 2] * grid[i - 3][j + 3] ) A_ = max( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) if max_product > largest: A_ = max_product return largest def __snake_case ( ): """simple docstring""" A_ = [] with open(os.path.dirname(__UpperCamelCase ) + "/grid.txt" ) as file: for line in file: grid.append(line.strip("\n" ).split(" " ) ) A_ = [[int(__UpperCamelCase ) for i in grid[j]] for j in range(len(__UpperCamelCase ) )] return largest_product(__UpperCamelCase ) if __name__ == "__main__": print(solution())
329
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : int ): A_ = tempfile.mkdtemp() A_ = BlipImageProcessor() A_ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" ) A_ = BlipProcessor(UpperCAmelCase , UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def __A ( self : Optional[int] , **UpperCAmelCase : Union[str, Any] ): return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).tokenizer def __A ( self : Optional[Any] , **UpperCAmelCase : int ): return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).image_processor def __A ( self : Any ): shutil.rmtree(self.tmpdirname ) def __A ( self : Dict ): A_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] A_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def __A ( self : Any ): A_ = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) A_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 ) A_ = BlipProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase ) def __A ( self : Dict ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = self.prepare_image_inputs() A_ = image_processor(UpperCAmelCase , return_tensors="np" ) A_ = processor(images=UpperCAmelCase , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __A ( self : int ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = "lower newer" A_ = processor(text=UpperCAmelCase ) A_ = tokenizer(UpperCAmelCase , return_token_type_ids=UpperCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self : Tuple ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = "lower newer" A_ = self.prepare_image_inputs() A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase ): processor() def __A ( self : Any ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A_ = processor.batch_decode(UpperCAmelCase ) A_ = tokenizer.batch_decode(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) def __A ( self : Optional[Any] ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = "lower newer" A_ = self.prepare_image_inputs() A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
329
1
__a :Optional[Any] = range(2, 20 + 1) __a :Optional[int] = [10**k for k in range(ks[-1] + 1)] __a :dict[int, dict[int, list[list[int]]]] = {} def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Optional[int] ): """simple docstring""" A_ = sum(a_i[j] for j in range(__UpperCamelCase ,len(__UpperCamelCase ) ) ) A_ = sum(a_i[j] * base[j] for j in range(min(len(__UpperCamelCase ) ,__UpperCamelCase ) ) ) A_ , A_ = 0, 0 A_ = n - i A_ = memo.get(__UpperCamelCase ) if sub_memo is not None: A_ = sub_memo.get(__UpperCamelCase ) if jumps is not None and len(__UpperCamelCase ) > 0: # find and make the largest jump without going over A_ = -1 for _k in range(len(__UpperCamelCase ) - 1 ,-1 ,-1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: A_ = _k break if max_jump >= 0: A_ , A_ , A_ = jumps[max_jump] # since the difference between jumps is cached, add c A_ = diff + c for j in range(min(__UpperCamelCase ,len(__UpperCamelCase ) ) ): A_ , A_ = divmod(__UpperCamelCase ,10 ) if new_c > 0: add(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) else: A_ = [] else: A_ = {c: []} A_ = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps A_ , A_ = next_term(__UpperCamelCase ,k - 1 ,i + dn ,__UpperCamelCase ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead A_ , A_ = compute(__UpperCamelCase ,__UpperCamelCase ,i + dn ,__UpperCamelCase ) diff += _diff dn += terms_jumped A_ = sub_memo[c] # keep jumps sorted by # of terms skipped A_ = 0 while j < len(__UpperCamelCase ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(__UpperCamelCase ,(diff, dn, k) ) return (diff, dn) def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Any ,__UpperCamelCase : Dict ,__UpperCamelCase : Optional[Any] ): """simple docstring""" if i >= n: return 0, i if k > len(__UpperCamelCase ): a_i.extend([0 for _ in range(k - len(__UpperCamelCase ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) A_ = i A_ , A_ , A_ = 0, 0, 0 for j in range(len(__UpperCamelCase ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 A_ = ds_c + ds_b diff += addend A_ = 0 for j in range(__UpperCamelCase ): A_ = a_i[j] + addend A_ , A_ = divmod(__UpperCamelCase ,10 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) return diff, i - start_i def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : int ): """simple docstring""" for j in range(__UpperCamelCase ,len(__UpperCamelCase ) ): A_ = digits[j] + addend if s >= 10: A_ , A_ = divmod(__UpperCamelCase ,10 ) A_ = addend // 10 + quotient else: A_ = s A_ = addend // 10 if addend == 0: break while addend > 0: A_ , A_ = divmod(__UpperCamelCase ,10 ) digits.append(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : int = 10**15 ): """simple docstring""" A_ = [1] A_ = 1 A_ = 0 while True: A_ , A_ = next_term(__UpperCamelCase ,20 ,i + dn ,__UpperCamelCase ) dn += terms_jumped if dn == n - i: break A_ = 0 for j in range(len(__UpperCamelCase ) ): a_n += digits[j] * 10**j return a_n if __name__ == "__main__": print(F"{solution() = }")
329
import math __a :Union[str, Any] = 10 __a :Union[str, Any] = 7 __a :int = BALLS_PER_COLOUR * NUM_COLOURS def __snake_case ( __UpperCamelCase : int = 20 ): """simple docstring""" A_ = math.comb(__UpperCamelCase ,__UpperCamelCase ) A_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR ,__UpperCamelCase ) A_ = NUM_COLOURS * (1 - missing_colour / total) return f'''{result:.9f}''' if __name__ == "__main__": print(solution(20))
329
1
from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Dict = 'new-model' if is_tf_available(): class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Any = NewModelConfig @require_tf class _a ( unittest.TestCase ): """simple docstring""" @slow def __A ( self : Dict ): A_ = "bert-base-cased" A_ = AutoConfig.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) A_ = TFAutoModel.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) @slow def __A ( self : Optional[Any] ): A_ = "bert-base-cased" A_ = AutoConfig.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) A_ = TFAutoModelForPreTraining.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) @slow def __A ( self : List[str] ): for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ = AutoConfig.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) A_ = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase ) A_ , A_ = TFAutoModelForCausalLM.from_pretrained(UpperCAmelCase , output_loading_info=UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) @slow def __A ( self : str ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ = AutoConfig.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) A_ = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) @slow def __A ( self : Dict ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ = AutoConfig.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) A_ = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase ) A_ , A_ = TFAutoModelForMaskedLM.from_pretrained(UpperCAmelCase , output_loading_info=UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) @slow def __A ( self : List[str] ): for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ = AutoConfig.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) A_ = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase ) A_ , A_ = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase , output_loading_info=UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) @slow def __A ( self : str ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: A_ = AutoConfig.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) A_ = TFAutoModelForSequenceClassification.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) @slow def __A ( self : Dict ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: A_ = AutoConfig.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) A_ = TFAutoModelForQuestionAnswering.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) @slow @require_tensorflow_probability def __A ( self : int ): for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: A_ = AutoConfig.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) A_ = TFAutoModelForTableQuestionAnswering.from_pretrained(UpperCAmelCase ) A_ , A_ = TFAutoModelForTableQuestionAnswering.from_pretrained( UpperCAmelCase , output_loading_info=UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) def __A ( self : Dict ): A_ = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) self.assertEqual(model.num_parameters() , 14410 ) self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase ) , 14410 ) def __A ( self : Dict ): A_ = TFAutoModelWithLMHead.from_pretrained(UpperCAmelCase ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) self.assertEqual(model.num_parameters() , 14410 ) self.assertEqual(model.num_parameters(only_trainable=UpperCAmelCase ) , 14410 ) def __A ( self : List[str] ): # For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel A_ = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) A_ = copy.deepcopy(model.config ) A_ = ["FunnelBaseModel"] A_ = TFAutoModel.from_config(UpperCAmelCase ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(UpperCAmelCase ) A_ = TFAutoModel.from_pretrained(UpperCAmelCase ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) def __A ( self : Any ): try: AutoConfig.register("new-model" , UpperCAmelCase ) A_ = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__ ): # Wrong config class will raise an error with self.assertRaises(UpperCAmelCase ): auto_class.register(UpperCAmelCase , UpperCAmelCase ) auto_class.register(UpperCAmelCase , UpperCAmelCase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCAmelCase ): auto_class.register(UpperCAmelCase , UpperCAmelCase ) # Now that the config is registered, it can be used as any other config with the auto-API A_ = BertModelTester(self ).get_config() A_ = NewModelConfig(**tiny_config.to_dict() ) A_ = auto_class.from_config(UpperCAmelCase ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(UpperCAmelCase ) A_ = auto_class.from_pretrained(UpperCAmelCase ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def __A ( self : str ): with self.assertRaisesRegex( UpperCAmelCase , "bert-base is not a local folder and is not a valid model identifier" ): A_ = TFAutoModel.from_pretrained("bert-base" ) def __A ( self : List[str] ): with self.assertRaisesRegex( UpperCAmelCase , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): A_ = TFAutoModel.from_pretrained(UpperCAmelCase , revision="aaaaaa" ) def __A ( self : int ): with self.assertRaisesRegex( UpperCAmelCase , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ): A_ = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" ) def __A ( self : List[Any] ): with self.assertRaisesRegex(UpperCAmelCase , "Use `from_pt=True` to load this model" ): A_ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" ) def __A ( self : Tuple ): # Make sure we have cached the model. A_ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" ) with RequestCounter() as counter: A_ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 ) # With a sharded checkpoint A_ = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" ) with RequestCounter() as counter: A_ = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
329
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer __a :Optional[Any] = logging.get_logger(__name__) __a :Any = {'vocab_file': 'vocab.txt'} __a :Any = { 'vocab_file': { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt', } } __a :List[str] = { 'YituTech/conv-bert-base': 512, 'YituTech/conv-bert-medium-small': 512, 'YituTech/conv-bert-small': 512, } __a :List[str] = { 'YituTech/conv-bert-base': {'do_lower_case': True}, 'YituTech/conv-bert-medium-small': {'do_lower_case': True}, 'YituTech/conv-bert-small': {'do_lower_case': True}, } class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Tuple = VOCAB_FILES_NAMES _lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION _lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase : Union[str, Any] = ConvBertTokenizer def __init__( self : Optional[int] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : int="[UNK]" , UpperCAmelCase : str="[SEP]" , UpperCAmelCase : Union[str, Any]="[PAD]" , UpperCAmelCase : Tuple="[CLS]" , UpperCAmelCase : Tuple="[MASK]" , UpperCAmelCase : Any=True , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : List[str] , ): super().__init__( UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , ) A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars ): A_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) ) A_ = do_lower_case A_ = strip_accents A_ = tokenize_chinese_chars A_ = normalizer_class(**UpperCAmelCase ) A_ = do_lower_case def __A ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Dict=None ): A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __A ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ): A_ = [self.sep_token_id] A_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ): A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase ) return tuple(UpperCAmelCase )
329
1
import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def __snake_case ( __UpperCamelCase : Union[str, Any] ): """simple docstring""" A_ = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(__UpperCamelCase ,__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Optional[Any] ): """simple docstring""" A_ = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: A_ = s_dict.pop(__UpperCamelCase ) elif "subsample" in key: A_ = s_dict.pop(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : List[str] ): """simple docstring""" A_ , A_ = emb.weight.shape A_ = nn.Linear(__UpperCamelCase ,__UpperCamelCase ,bias=__UpperCamelCase ) A_ = emb.weight.data return lin_layer def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Optional[int] ): """simple docstring""" A_ = torch.load(__UpperCamelCase ,map_location="cpu" ) A_ = mam_aaa["args"] A_ = mam_aaa["model"] A_ = state_dict["decoder.output_projection.weight"] remove_ignore_keys_(__UpperCamelCase ) rename_keys(__UpperCamelCase ) A_ = state_dict["decoder.embed_tokens.weight"].shape[0] A_ = args.share_decoder_input_output_embed A_ = [int(__UpperCamelCase ) for i in args.conv_kernel_sizes.split("," )] A_ = SpeechaTextConfig( vocab_size=__UpperCamelCase ,max_source_positions=args.max_source_positions ,max_target_positions=args.max_target_positions ,encoder_layers=args.encoder_layers ,decoder_layers=args.decoder_layers ,encoder_attention_heads=args.encoder_attention_heads ,decoder_attention_heads=args.decoder_attention_heads ,encoder_ffn_dim=args.encoder_ffn_embed_dim ,decoder_ffn_dim=args.decoder_ffn_embed_dim ,d_model=args.encoder_embed_dim ,dropout=args.dropout ,attention_dropout=args.attention_dropout ,activation_dropout=args.activation_dropout ,activation_function="relu" ,num_conv_layers=len(__UpperCamelCase ) ,conv_channels=args.conv_channels ,conv_kernel_sizes=__UpperCamelCase ,input_feat_per_channel=args.input_feat_per_channel ,input_channels=args.input_channels ,tie_word_embeddings=__UpperCamelCase ,num_beams=5 ,max_length=200 ,use_cache=__UpperCamelCase ,decoder_start_token_id=2 ,early_stopping=__UpperCamelCase ,) A_ = SpeechaTextForConditionalGeneration(__UpperCamelCase ) A_ , A_ = model.model.load_state_dict(__UpperCamelCase ,strict=__UpperCamelCase ) if len(__UpperCamelCase ) > 0 and not set(__UpperCamelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing," f''' but all the following weights are missing {missing}''' ) if tie_embeds: A_ = make_linear_from_emb(model.model.decoder.embed_tokens ) else: A_ = lm_head_weights model.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __a :Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') __a :Dict = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
329
import warnings from ...utils import logging from .image_processing_videomae import VideoMAEImageProcessor __a :Optional[Any] = logging.get_logger(__name__) class _a ( snake_case_ ): """simple docstring""" def __init__( self : List[str] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ): warnings.warn( "The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use VideoMAEImageProcessor instead." , UpperCAmelCase , ) super().__init__(*UpperCAmelCase , **UpperCAmelCase )
329
1
from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, flip_channel_order, get_resize_output_image_size, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging if is_vision_available(): import PIL if is_torch_available(): import torch __a :Dict = logging.get_logger(__name__) class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : List[str] = ['pixel_values'] def __init__( self : int , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , UpperCAmelCase : Union[int, float] = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = True , **UpperCAmelCase : Dict , ): super().__init__(**UpperCAmelCase ) A_ = size if size is not None else {"shortest_edge": 224} A_ = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase ) A_ = crop_size if crop_size is not None else {"height": 256, "width": 256} A_ = get_size_dict(UpperCAmelCase , param_name="crop_size" ) A_ = do_resize A_ = size A_ = resample A_ = do_rescale A_ = rescale_factor A_ = do_center_crop A_ = crop_size A_ = do_flip_channel_order def __A ( self : str , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PIL.Image.BILINEAR , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : List[Any] , ): A_ = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase ) if "shortest_edge" not in size: raise ValueError(f'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' ) A_ = get_resize_output_image_size(UpperCAmelCase , size=size["shortest_edge"] , default_to_square=UpperCAmelCase ) return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def __A ( self : List[Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Tuple , ): A_ = get_size_dict(UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' ) return center_crop(UpperCAmelCase , size=(size["height"], size["width"]) , data_format=UpperCAmelCase , **UpperCAmelCase ) def __A ( self : str , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : int , ): return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def __A ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None ): return flip_channel_order(UpperCAmelCase , data_format=UpperCAmelCase ) def __A ( self : Any , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : Optional[Any] , ): A_ = do_resize if do_resize is not None else self.do_resize A_ = resample if resample is not None else self.resample A_ = do_rescale if do_rescale is not None else self.do_rescale A_ = rescale_factor if rescale_factor is not None else self.rescale_factor A_ = do_center_crop if do_center_crop is not None else self.do_center_crop A_ = ( do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order ) A_ = size if size is not None else self.size A_ = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase ) A_ = crop_size if crop_size is not None else self.crop_size A_ = get_size_dict(UpperCAmelCase , param_name="crop_size" ) A_ = make_list_of_images(UpperCAmelCase ) if not valid_images(UpperCAmelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) # All transformations expect numpy arrays. A_ = [to_numpy_array(UpperCAmelCase ) for image in images] if do_resize: A_ = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images] if do_center_crop: A_ = [self.center_crop(image=UpperCAmelCase , size=UpperCAmelCase ) for image in images] if do_rescale: A_ = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images] # the pretrained checkpoints assume images are BGR, not RGB if do_flip_channel_order: A_ = [self.flip_channel_order(image=UpperCAmelCase ) for image in images] A_ = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images] A_ = {"pixel_values": images} return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase ) def __A ( self : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : List[Tuple] = None ): A_ = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(UpperCAmelCase ) != len(UpperCAmelCase ): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) if is_torch_tensor(UpperCAmelCase ): A_ = target_sizes.numpy() A_ = [] for idx in range(len(UpperCAmelCase ) ): A_ = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=UpperCAmelCase ) A_ = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(UpperCAmelCase ) else: A_ = logits.argmax(dim=1 ) A_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
329
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _a : """simple docstring""" @staticmethod def __A ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Union[str, Any] ): pass @is_pipeline_test @require_vision class _a ( unittest.TestCase ): """simple docstring""" @require_torch def __A ( self : List[str] ): A_ = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , ) A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(UpperCAmelCase ) , [ [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}], ] , ) A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], ] , ) @require_tf def __A ( self : int ): A_ = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" ) A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , ) A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], ] , ) @slow @require_torch def __A ( self : Any ): A_ = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , ) # This is an image of 2 cats with remotes and no planes A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , ) @slow @require_tf def __A ( self : Optional[Any] ): A_ = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" ) # This is an image of 2 cats with remotes and no planes A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , )
329
1
import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def __snake_case ( *__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[Union[Dict, Any]] = None ,__UpperCamelCase : Optional[Any]=True ,__UpperCamelCase : Optional[Any]=2 ): """simple docstring""" from .. import __version__ A_ = take_from A_ = () if not isinstance(args[0] ,__UpperCamelCase ): A_ = (args,) for attribute, version_name, message in args: if version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse(__UpperCamelCase ): raise ValueError( f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\'''' f''' version {__version__} is >= {version_name}''' ) A_ = None if isinstance(__UpperCamelCase ,__UpperCamelCase ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(__UpperCamelCase ),) A_ = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.''' elif hasattr(__UpperCamelCase ,__UpperCamelCase ): values += (getattr(__UpperCamelCase ,__UpperCamelCase ),) A_ = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.''' elif deprecated_kwargs is None: A_ = f'''`{attribute}` is deprecated and will be removed in version {version_name}.''' if warning is not None: A_ = warning + " " if standard_warn else "" warnings.warn(warning + message ,__UpperCamelCase ,stacklevel=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) and len(__UpperCamelCase ) > 0: A_ = inspect.getouterframes(inspect.currentframe() )[1] A_ = call_frame.filename A_ = call_frame.lineno A_ = call_frame.function A_ , A_ = next(iter(deprecated_kwargs.items() ) ) raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' ) if len(__UpperCamelCase ) == 0: return elif len(__UpperCamelCase ) == 1: return values[0] return values
329
import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict=10 ): """simple docstring""" A_ = [] for _ in range(__UpperCamelCase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Tuple=10 ): """simple docstring""" A_ = [] for step in range(__UpperCamelCase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: A_ = os.path.join(__UpperCamelCase ,"schedule.bin" ) torch.save(scheduler.state_dict() ,__UpperCamelCase ) A_ = torch.load(__UpperCamelCase ) scheduler.load_state_dict(__UpperCamelCase ) return lrs @require_torch class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : Any , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ): self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) ) for a, b in zip(UpperCAmelCase , UpperCAmelCase ): self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase ) def __A ( self : List[Any] ): A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase ) A_ = torch.tensor([0.4, 0.2, -0.5] ) A_ = nn.MSELoss() # No warmup, constant schedule, no gradient clipping A_ = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 ) for _ in range(100 ): A_ = criterion(UpperCAmelCase , UpperCAmelCase ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) def __A ( self : Dict ): A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase ) A_ = torch.tensor([0.4, 0.2, -0.5] ) A_ = nn.MSELoss() # No warmup, constant schedule, no gradient clipping A_ = Adafactor( params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCAmelCase , weight_decay=0.0 , relative_step=UpperCAmelCase , scale_parameter=UpperCAmelCase , warmup_init=UpperCAmelCase , ) for _ in range(1000 ): A_ = criterion(UpperCAmelCase , UpperCAmelCase ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) @require_torch class _a ( unittest.TestCase ): """simple docstring""" _lowerCamelCase : Optional[int] = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None _lowerCamelCase : Any = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None _lowerCamelCase : Any = 1_0 def __A ( self : str , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Dict=None ): self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) ) for a, b in zip(UpperCAmelCase , UpperCAmelCase ): self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase , msg=UpperCAmelCase ) def __A ( self : List[Any] ): A_ = {"num_warmup_steps": 2, "num_training_steps": 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) A_ = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"num_warmup_steps": 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, "num_cycles": 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, "power": 2.0, "lr_end": 1E-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {"num_warmup_steps": 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): A_ , A_ = data A_ = scheduler_func(self.optimizer , **UpperCAmelCase ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) A_ = unwrap_schedule(UpperCAmelCase , self.num_steps ) self.assertListAlmostEqual( UpperCAmelCase , UpperCAmelCase , tol=1E-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , ) A_ = scheduler_func(self.optimizer , **UpperCAmelCase ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(UpperCAmelCase ) # wrap to test picklability of the schedule A_ = unwrap_and_save_reload_schedule(UpperCAmelCase , self.num_steps ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase , msg=f'''failed for {scheduler_func} in save and reload''' ) class _a : """simple docstring""" def __init__( self : List[str] , UpperCAmelCase : List[str] ): A_ = fn def __call__( self : Union[str, Any] , *UpperCAmelCase : str , **UpperCAmelCase : Optional[Any] ): return self.fn(*UpperCAmelCase , **UpperCAmelCase ) @classmethod def __A ( self : Dict , UpperCAmelCase : List[str] ): A_ = list(map(self , scheduler.lr_lambdas ) )
329
1
import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _a : """simple docstring""" @staticmethod def __A ( *UpperCAmelCase : Tuple , **UpperCAmelCase : Any ): pass @is_pipeline_test @require_torch @require_vision class _a ( unittest.TestCase ): """simple docstring""" _lowerCamelCase : Optional[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def __A ( self : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] ): A_ = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" ) A_ = [ { "image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "question": "How many cats are there?", }, { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "question": "How many cats are there?", }, ] return vqa_pipeline, examples def __A ( self : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Dict ): A_ = vqa_pipeline(UpperCAmelCase , top_k=1 ) self.assertEqual( UpperCAmelCase , [ [{"score": ANY(UpperCAmelCase ), "answer": ANY(UpperCAmelCase )}], [{"score": ANY(UpperCAmelCase ), "answer": ANY(UpperCAmelCase )}], ] , ) @require_torch def __A ( self : Union[str, Any] ): A_ = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" ) A_ = "./tests/fixtures/tests_samples/COCO/000000039769.png" A_ = "How many cats are there?" A_ = vqa_pipeline(image=UpperCAmelCase , question="How many cats are there?" , top_k=2 ) self.assertEqual( UpperCAmelCase , [{"score": ANY(UpperCAmelCase ), "answer": ANY(UpperCAmelCase )}, {"score": ANY(UpperCAmelCase ), "answer": ANY(UpperCAmelCase )}] ) A_ = vqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( UpperCAmelCase , [{"score": ANY(UpperCAmelCase ), "answer": ANY(UpperCAmelCase )}, {"score": ANY(UpperCAmelCase ), "answer": ANY(UpperCAmelCase )}] ) @slow @require_torch def __A ( self : Optional[Any] ): A_ = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" ) A_ = "./tests/fixtures/tests_samples/COCO/000000039769.png" A_ = "How many cats are there?" A_ = vqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase , decimals=4 ) , [{"score": 0.8_799, "answer": "2"}, {"score": 0.296, "answer": "1"}] ) A_ = vqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase , decimals=4 ) , [{"score": 0.8_799, "answer": "2"}, {"score": 0.296, "answer": "1"}] ) A_ = vqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase , decimals=4 ) , [[{"score": 0.8_799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2 , ) @require_tf @unittest.skip("Visual question answering not implemented in TF" ) def __A ( self : Tuple ): pass
329
import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def __snake_case ( __UpperCamelCase : Optional[int] ): # picklable for multiprocessing """simple docstring""" return x.sum() def __snake_case ( __UpperCamelCase : List[str] ): # picklable for multiprocessing """simple docstring""" return i + 1 @dataclass class _a : """simple docstring""" _lowerCamelCase : int _lowerCamelCase : str class _a ( snake_case_ ): """simple docstring""" def __A ( self : Dict ): A_ = {} A_ = [] A_ = 1 A_ = [1, 2] A_ = {"a": 1, "b": 2} A_ = {"a": [1, 2], "b": [3, 4]} A_ = {"a": {"1": 1}, "b": 2} A_ = {"a": 1, "b": 2, "c": 3, "d": 4} A_ = {} A_ = [] A_ = 2 A_ = [2, 3] A_ = {"a": 2, "b": 3} A_ = {"a": [2, 3], "b": [4, 5]} A_ = {"a": {"1": 2}, "b": 3} A_ = {"a": 2, "b": 3, "c": 4, "d": 5} self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) A_ = 2 self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) A_ = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )} A_ = {"a": 2, "b": 0, "c": 2} A_ = { "a": np.eye(2 ).astype(UpperCAmelCase ), "b": np.zeros(3 ).astype(UpperCAmelCase ), "c": np.ones(2 ).astype(UpperCAmelCase ), } self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual( {k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual( {k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(UpperCAmelCase ): # can't pickle a local lambda map_nested(lambda UpperCAmelCase : x + 1 , UpperCAmelCase , num_proc=UpperCAmelCase ) def __A ( self : List[str] ): A_ = {"a": 1, "b": 2} A_ = {"a": 3, "b": 4} A_ = {"a": 5, "b": 6} A_ = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ) , UpperCAmelCase ) def __A ( self : Any ): class _a : """simple docstring""" _lowerCamelCase : int = 'bar' A_ = Foo() self.assertEqual(foo.my_attr , "bar" ) with temporary_assignment(UpperCAmelCase , "my_attr" , "BAR" ): self.assertEqual(foo.my_attr , "BAR" ) self.assertEqual(foo.my_attr , "bar" ) @pytest.mark.parametrize( "iterable_length, num_proc, expected_num_proc" ,[ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ] ,) def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[Any] ): """simple docstring""" with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch( "datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool: A_ = {f'''{i}''': i for i in range(__UpperCamelCase )} A_ = map_nested(lambda __UpperCamelCase : x + 10 ,__UpperCamelCase ,num_proc=__UpperCamelCase ,parallel_min_length=16 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class _a ( snake_case_ ): """simple docstring""" @require_tf def __A ( self : Union[str, Any] ): import tensorflow as tf from tensorflow.keras import layers A_ = layers.Dense(2 ) def gen_random_output(): A_ = tf.random.uniform((1, 3) ) return model(UpperCAmelCase ).numpy() with temp_seed(42 , set_tensorflow=UpperCAmelCase ): A_ = gen_random_output() with temp_seed(42 , set_tensorflow=UpperCAmelCase ): A_ = gen_random_output() A_ = gen_random_output() np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @require_torch def __A ( self : Optional[int] ): import torch def gen_random_output(): A_ = torch.nn.Linear(3 , 2 ) A_ = torch.rand(1 , 3 ) return model(UpperCAmelCase ).detach().numpy() with temp_seed(42 , set_pytorch=UpperCAmelCase ): A_ = gen_random_output() with temp_seed(42 , set_pytorch=UpperCAmelCase ): A_ = gen_random_output() A_ = gen_random_output() np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) def __A ( self : Any ): def gen_random_output(): return np.random.rand(1 , 3 ) with temp_seed(42 ): A_ = gen_random_output() with temp_seed(42 ): A_ = gen_random_output() A_ = gen_random_output() np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @pytest.mark.parametrize("input_data" ,[{}] ) def __snake_case ( __UpperCamelCase : str ): """simple docstring""" A_ = NestedDataStructure(__UpperCamelCase ).data assert output_data == input_data @pytest.mark.parametrize( "data, expected_output" ,[ ({}, []), ([], []), ("foo", ["foo"]), (["foo", "bar"], ["foo", "bar"]), ([["foo", "bar"]], ["foo", "bar"]), ([[["foo"], ["bar"]]], ["foo", "bar"]), ([[["foo"], "bar"]], ["foo", "bar"]), ({"a": 1, "b": 2}, [1, 2]), ({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]), ({"a": {"1": 1}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": [2]}, [1, 2]), ] ,) def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Any ): """simple docstring""" A_ = NestedDataStructure(__UpperCamelCase ).flatten() assert output == expected_output def __snake_case ( ): """simple docstring""" A_ = A(x=1 ,y="foobar" ) A_ = {"x": 1, "y": "foobar"} assert asdict(__UpperCamelCase ) == expected_output A_ = {"a": {"b": A(x=10 ,y="foo" )}, "c": [A(x=20 ,y="bar" )]} A_ = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]} assert asdict(__UpperCamelCase ) == expected_output with pytest.raises(__UpperCamelCase ): asdict([1, A(x=10 ,y="foo" )] ) def __snake_case ( __UpperCamelCase : str ): """simple docstring""" return text.split() def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def __snake_case ( ): """simple docstring""" with Pool(2 ) as pool: A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) ) assert out.count("hello" ) == 10 assert out.count("there" ) == 10 assert len(__UpperCamelCase ) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) ) assert out.count("hello" ) == 10 assert out.count("there" ) == 10 assert len(__UpperCamelCase ) == 20 # check that we get items as fast as possible with Pool(2 ) as pool: A_ = [] for yield_time, content in iflatmap_unordered( __UpperCamelCase ,_aseconds_generator_of_aitems_with_timing ,kwargs_iterable=[{"content": "a"}, {"content": "b"}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(__UpperCamelCase ) assert out.count("a" ) == 2 assert out.count("b" ) == 2 assert len(__UpperCamelCase ) == 4
329
1
import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType __a , __a , __a :Union[str, Any] = False, False, False @dataclass class _a : """simple docstring""" _lowerCamelCase : Optional[int] = None _lowerCamelCase : bool = True _lowerCamelCase : bool = True _lowerCamelCase : Optional[str] = None # Automatically constructed _lowerCamelCase : ClassVar[str] = "dict" _lowerCamelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} ) _lowerCamelCase : str = field(default='Audio' , init=snake_case_ , repr=snake_case_ ) def __call__( self : List[str] ): return self.pa_type def __A ( self : Optional[Any] , UpperCAmelCase : Union[str, bytes, dict] ): try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err if isinstance(UpperCAmelCase , UpperCAmelCase ): return {"bytes": None, "path": value} elif isinstance(UpperCAmelCase , UpperCAmelCase ): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes A_ = BytesIO() sf.write(UpperCAmelCase , value["array"] , value["sampling_rate"] , format="wav" ) return {"bytes": buffer.getvalue(), "path": None} elif value.get("path" ) is not None and os.path.isfile(value["path"] ): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith("pcm" ): # "PCM" only has raw audio bytes if value.get("sampling_rate" ) is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" ) if value.get("bytes" ): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) A_ = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 32767 else: A_ = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 32767 A_ = BytesIO(bytes() ) sf.write(UpperCAmelCase , UpperCAmelCase , value["sampling_rate"] , format="wav" ) return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get("path" )} elif value.get("bytes" ) is not None or value.get("path" ) is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get("bytes" ), "path": value.get("path" )} else: raise ValueError( f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' ) def __A ( self : Optional[int] , UpperCAmelCase : dict , UpperCAmelCase : Optional[Dict[str, Union[str, bool, None]]] = None ): if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." ) A_ , A_ = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None) if path is None and file is None: raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' ) try: import librosa import soundfile as sf except ImportError as err: raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err A_ = xsplitext(UpperCAmelCase )[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( "Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, " "You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( "Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, " "You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " ) if file is None: A_ = token_per_repo_id or {} A_ = path.split("::" )[-1] try: A_ = string_to_dict(UpperCAmelCase , config.HUB_DATASETS_URL )["repo_id"] A_ = token_per_repo_id[repo_id] except (ValueError, KeyError): A_ = None with xopen(UpperCAmelCase , "rb" , use_auth_token=UpperCAmelCase ) as f: A_ , A_ = sf.read(UpperCAmelCase ) else: A_ , A_ = sf.read(UpperCAmelCase ) A_ = array.T if self.mono: A_ = librosa.to_mono(UpperCAmelCase ) if self.sampling_rate and self.sampling_rate != sampling_rate: A_ = librosa.resample(UpperCAmelCase , orig_sr=UpperCAmelCase , target_sr=self.sampling_rate ) A_ = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def __A ( self : Optional[int] ): from .features import Value if self.decode: raise ValueError("Cannot flatten a decoded Audio feature." ) return { "bytes": Value("binary" ), "path": Value("string" ), } def __A ( self : Optional[Any] , UpperCAmelCase : Union[pa.StringArray, pa.StructArray] ): if pa.types.is_string(storage.type ): A_ = pa.array([None] * len(UpperCAmelCase ) , type=pa.binary() ) A_ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): A_ = pa.array([None] * len(UpperCAmelCase ) , type=pa.string() ) A_ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ): A_ = pa.array([Audio().encode_example(UpperCAmelCase ) if x is not None else None for x in storage.to_pylist()] ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("bytes" ) >= 0: A_ = storage.field("bytes" ) else: A_ = pa.array([None] * len(UpperCAmelCase ) , type=pa.binary() ) if storage.type.get_field_index("path" ) >= 0: A_ = storage.field("path" ) else: A_ = pa.array([None] * len(UpperCAmelCase ) , type=pa.string() ) A_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() ) return array_cast(UpperCAmelCase , self.pa_type ) def __A ( self : Tuple , UpperCAmelCase : pa.StructArray ): @no_op_if_value_is_null def path_to_bytes(UpperCAmelCase : str ): with xopen(UpperCAmelCase , "rb" ) as f: A_ = f.read() return bytes_ A_ = pa.array( [ (path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) A_ = pa.array( [os.path.basename(UpperCAmelCase ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , ) A_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() ) return array_cast(UpperCAmelCase , self.pa_type )
329
import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" if ( (cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F) or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) # or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) # or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) # or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) # or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) # or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F) or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) # ): # return True return False def __snake_case ( __UpperCamelCase : str ): """simple docstring""" for char in word: A_ = ord(__UpperCamelCase ) if not _is_chinese_char(__UpperCamelCase ): return 0 return 1 def __snake_case ( __UpperCamelCase : List[str] ): """simple docstring""" A_ = set() for token in tokens: A_ = len(__UpperCamelCase ) > 1 and is_chinese(__UpperCamelCase ) if chinese_word: word_set.add(__UpperCamelCase ) A_ = list(__UpperCamelCase ) return word_list def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : set() ): """simple docstring""" if not chinese_word_set: return bert_tokens A_ = max([len(__UpperCamelCase ) for w in chinese_word_set] ) A_ = bert_tokens A_ , A_ = 0, len(__UpperCamelCase ) while start < end: A_ = True if is_chinese(bert_word[start] ): A_ = min(end - start ,__UpperCamelCase ) for i in range(__UpperCamelCase ,1 ,-1 ): A_ = "".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 ,start + i ): A_ = "##" + bert_word[j] A_ = start + i A_ = False break if single_word: start += 1 return bert_word def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : LTP ,__UpperCamelCase : BertTokenizer ): """simple docstring""" A_ = [] for i in range(0 ,len(__UpperCamelCase ) ,100 ): A_ = ltp_tokenizer.seg(lines[i : i + 100] )[0] A_ = [get_chinese_word(__UpperCamelCase ) for r in res] ltp_res.extend(__UpperCamelCase ) assert len(__UpperCamelCase ) == len(__UpperCamelCase ) A_ = [] for i in range(0 ,len(__UpperCamelCase ) ,100 ): A_ = bert_tokenizer(lines[i : i + 100] ,add_special_tokens=__UpperCamelCase ,truncation=__UpperCamelCase ,max_length=512 ) bert_res.extend(res["input_ids"] ) assert len(__UpperCamelCase ) == len(__UpperCamelCase ) A_ = [] for input_ids, chinese_word in zip(__UpperCamelCase ,__UpperCamelCase ): A_ = [] for id in input_ids: A_ = bert_tokenizer._convert_id_to_token(__UpperCamelCase ) input_tokens.append(__UpperCamelCase ) A_ = add_sub_symbol(__UpperCamelCase ,__UpperCamelCase ) A_ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(__UpperCamelCase ): if token[:2] == "##": A_ = token[2:] # save chinese tokens' pos if len(__UpperCamelCase ) == 1 and _is_chinese_char(ord(__UpperCamelCase ) ): ref_id.append(__UpperCamelCase ) ref_ids.append(__UpperCamelCase ) assert len(__UpperCamelCase ) == len(__UpperCamelCase ) return ref_ids def __snake_case ( __UpperCamelCase : Dict ): """simple docstring""" with open(args.file_name ,"r" ,encoding="utf-8" ) as f: A_ = f.readlines() A_ = [line.strip() for line in data if len(__UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' A_ = LTP(args.ltp ) # faster in GPU device A_ = BertTokenizer.from_pretrained(args.bert ) A_ = prepare_ref(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) with open(args.save_path ,"w" ,encoding="utf-8" ) as f: A_ = [json.dumps(__UpperCamelCase ) + "\n" for ref in ref_ids] f.writelines(__UpperCamelCase ) if __name__ == "__main__": __a :List[Any] = argparse.ArgumentParser(description='prepare_chinese_ref') parser.add_argument( '--file_name', type=str, default='./resources/chinese-demo.txt', help='file need process, same as training data in lm', ) parser.add_argument( '--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path' ) parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer') parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res') __a :Dict = parser.parse_args() main(args)
329
1
import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class _a ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : Dict = VQModel _lowerCamelCase : Union[str, Any] = 'sample' @property def __A ( self : Union[str, Any] , UpperCAmelCase : List[Any]=(32, 32) ): A_ = 4 A_ = 3 A_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase ) return {"sample": image} @property def __A ( self : List[str] ): return (3, 32, 32) @property def __A ( self : List[str] ): return (3, 32, 32) def __A ( self : Dict ): A_ = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 3, } A_ = self.dummy_input return init_dict, inputs_dict def __A ( self : Dict ): pass def __A ( self : List[Any] ): pass def __A ( self : Any ): A_ , A_ = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(UpperCAmelCase ) A_ = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def __A ( self : Union[str, Any] ): A_ = VQModel.from_pretrained("fusing/vqgan-dummy" ) model.to(UpperCAmelCase ).eval() torch.manual_seed(0 ) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0 ) A_ = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size ) A_ = image.to(UpperCAmelCase ) with torch.no_grad(): A_ = model(UpperCAmelCase ).sample A_ = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off A_ = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143] ) # fmt: on self.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1E-3 ) )
329
import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def __snake_case ( __UpperCamelCase : Features ): """simple docstring""" A_ = np.inf def set_batch_size(__UpperCamelCase : FeatureType ) -> None: nonlocal batch_size if isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(__UpperCamelCase ,__UpperCamelCase ) and feature.dtype == "binary": A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(__UpperCamelCase ,__UpperCamelCase ) return None if batch_size is np.inf else batch_size class _a ( snake_case_ ): """simple docstring""" def __init__( self : Tuple , UpperCAmelCase : NestedDataStructureLike[PathLike] , UpperCAmelCase : Optional[NamedSplit] = None , UpperCAmelCase : Optional[Features] = None , UpperCAmelCase : str = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : Tuple , ): super().__init__( UpperCAmelCase , split=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , keep_in_memory=UpperCAmelCase , streaming=UpperCAmelCase , num_proc=UpperCAmelCase , **UpperCAmelCase , ) A_ = path_or_paths if isinstance(UpperCAmelCase , UpperCAmelCase ) else {self.split: path_or_paths} A_ = _PACKAGED_DATASETS_MODULES["parquet"][1] A_ = Parquet( cache_dir=UpperCAmelCase , data_files=UpperCAmelCase , features=UpperCAmelCase , hash=UpperCAmelCase , **UpperCAmelCase , ) def __A ( self : Optional[Any] ): # Build iterable dataset if self.streaming: A_ = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: A_ = None A_ = None A_ = None A_ = None self.builder.download_and_prepare( download_config=UpperCAmelCase , download_mode=UpperCAmelCase , verification_mode=UpperCAmelCase , base_path=UpperCAmelCase , num_proc=self.num_proc , ) A_ = self.builder.as_dataset( split=self.split , verification_mode=UpperCAmelCase , in_memory=self.keep_in_memory ) return dataset class _a : """simple docstring""" def __init__( self : Any , UpperCAmelCase : Dataset , UpperCAmelCase : Union[PathLike, BinaryIO] , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : List[Any] , ): A_ = dataset A_ = path_or_buf A_ = batch_size or get_writer_batch_size(dataset.features ) A_ = parquet_writer_kwargs def __A ( self : int ): A_ = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , "wb+" ) as buffer: A_ = self._write(file_obj=UpperCAmelCase , batch_size=UpperCAmelCase , **self.parquet_writer_kwargs ) else: A_ = self._write(file_obj=self.path_or_buf , batch_size=UpperCAmelCase , **self.parquet_writer_kwargs ) return written def __A ( self : Tuple , UpperCAmelCase : BinaryIO , UpperCAmelCase : int , **UpperCAmelCase : Optional[Any] ): A_ = 0 A_ = parquet_writer_kwargs.pop("path_or_buf" , UpperCAmelCase ) A_ = self.dataset.features.arrow_schema A_ = pq.ParquetWriter(UpperCAmelCase , schema=UpperCAmelCase , **UpperCAmelCase ) for offset in logging.tqdm( range(0 , len(self.dataset ) , UpperCAmelCase ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ): A_ = query_table( table=self.dataset._data , key=slice(UpperCAmelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(UpperCAmelCase ) written += batch.nbytes writer.close() return written
329
1
from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __a :int = [ 'python', 'tqdm', 'regex', 'requests', 'packaging', 'filelock', 'numpy', 'tokenizers', 'huggingface-hub', 'safetensors', 'accelerate', 'pyyaml', ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py") def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : List[Any]=None ): """simple docstring""" require_version(deps[pkg] ,__UpperCamelCase )
329
from __future__ import annotations def __snake_case ( __UpperCamelCase : int = 4 ): """simple docstring""" A_ = abs(__UpperCamelCase ) or 4 return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )] def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" return reverse_row(transpose(__UpperCamelCase ) ) # OR.. transpose(reverse_column(matrix)) def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" return reverse_row(reverse_column(__UpperCamelCase ) ) # OR.. reverse_column(reverse_row(matrix)) def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" return reverse_column(transpose(__UpperCamelCase ) ) # OR.. transpose(reverse_row(matrix)) def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" A_ = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )] return matrix def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" A_ = matrix[::-1] return matrix def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" A_ = [x[::-1] for x in matrix] return matrix def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" for i in matrix: print(*__UpperCamelCase ) if __name__ == "__main__": __a :Any = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 90 counterclockwise:\n') print_matrix(rotate_aa(matrix)) __a :Any = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 180:\n') print_matrix(rotate_aaa(matrix)) __a :Any = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 270 counterclockwise:\n') print_matrix(rotate_aaa(matrix))
329
1
import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline __a :List[Any] = datasets.utils.logging.get_logger(__name__) @dataclass class _a ( datasets.BuilderConfig ): """simple docstring""" _lowerCamelCase : Optional[datasets.Features] = None _lowerCamelCase : str = "utf-8" _lowerCamelCase : Optional[str] = None _lowerCamelCase : Optional[str] = None _lowerCamelCase : bool = True # deprecated _lowerCamelCase : Optional[int] = None # deprecated _lowerCamelCase : int = 1_0 << 2_0 # 10MB _lowerCamelCase : Optional[bool] = None class _a ( datasets.ArrowBasedBuilder ): """simple docstring""" _lowerCamelCase : Union[str, Any] = JsonConfig def __A ( self : Optional[Any] ): if self.config.block_size is not None: logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead" ) A_ = self.config.block_size if self.config.use_threads is not True: logger.warning( "The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." ) if self.config.newlines_in_values is not None: raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported" ) return datasets.DatasetInfo(features=self.config.features ) def __A ( self : Dict , UpperCAmelCase : Tuple ): if not self.config.data_files: raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) A_ = dl_manager.download_and_extract(self.config.data_files ) if isinstance(UpperCAmelCase , (str, list, tuple) ): A_ = data_files if isinstance(UpperCAmelCase , UpperCAmelCase ): A_ = [files] A_ = [dl_manager.iter_files(UpperCAmelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] A_ = [] for split_name, files in data_files.items(): if isinstance(UpperCAmelCase , UpperCAmelCase ): A_ = [files] A_ = [dl_manager.iter_files(UpperCAmelCase ) for file in files] splits.append(datasets.SplitGenerator(name=UpperCAmelCase , gen_kwargs={"files": files} ) ) return splits def __A ( self : Optional[Any] , UpperCAmelCase : pa.Table ): if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): A_ = self.config.features.arrow_schema.field(UpperCAmelCase ).type A_ = pa_table.append_column(UpperCAmelCase , pa.array([None] * len(UpperCAmelCase ) , type=UpperCAmelCase ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example A_ = table_cast(UpperCAmelCase , self.config.features.arrow_schema ) return pa_table def __A ( self : Optional[Any] , UpperCAmelCase : List[str] ): for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(UpperCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: A_ = json.load(UpperCAmelCase ) # We keep only the field we are interested in A_ = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(UpperCAmelCase , (list, tuple) ): A_ = set().union(*[row.keys() for row in dataset] ) A_ = {col: [row.get(UpperCAmelCase ) for row in dataset] for col in keys} else: A_ = dataset A_ = pa.Table.from_pydict(UpperCAmelCase ) yield file_idx, self._cast_table(UpperCAmelCase ) # If the file has one json object per line else: with open(UpperCAmelCase , "rb" ) as f: A_ = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small A_ = max(self.config.chunksize // 32 , 16 << 10 ) A_ = ( self.config.encoding_errors if self.config.encoding_errors is not None else "strict" ) while True: A_ = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(UpperCAmelCase ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": A_ = batch.decode(self.config.encoding , errors=UpperCAmelCase ).encode("utf-8" ) try: while True: try: A_ = paj.read_json( io.BytesIO(UpperCAmelCase ) , read_options=paj.ReadOptions(block_size=UpperCAmelCase ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(UpperCAmelCase , pa.ArrowInvalid ) and "straddling" not in str(UpperCAmelCase ) or block_size > len(UpperCAmelCase ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( f'''Batch of {len(UpperCAmelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( UpperCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: A_ = json.load(UpperCAmelCase ) except json.JSONDecodeError: logger.error(f'''Failed to read file \'{file}\' with error {type(UpperCAmelCase )}: {e}''' ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(UpperCAmelCase , UpperCAmelCase ): # list is the only sequence type supported in JSON try: A_ = set().union(*[row.keys() for row in dataset] ) A_ = {col: [row.get(UpperCAmelCase ) for row in dataset] for col in keys} A_ = pa.Table.from_pydict(UpperCAmelCase ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(f'''Failed to read file \'{file}\' with error {type(UpperCAmelCase )}: {e}''' ) raise ValueError(f'''Not able to read records in the JSON file at {file}.''' ) from None yield file_idx, self._cast_table(UpperCAmelCase ) break else: logger.error(f'''Failed to read file \'{file}\' with error {type(UpperCAmelCase )}: {e}''' ) raise ValueError( f'''Not able to read records in the JSON file at {file}. ''' f'''You should probably indicate the field of the JSON file containing your records. ''' f'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. ''' f'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase ) batch_idx += 1
329
from ..utils import DummyObject, requires_backends class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Union[str, Any] = ['torch', 'transformers', 'onnx'] def __init__( self : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Tuple , *UpperCAmelCase : Tuple , **UpperCAmelCase : Union[str, Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Dict , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Tuple ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Tuple = ['torch', 'transformers', 'onnx'] def __init__( self : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : List[Any] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Tuple , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Any = ['torch', 'transformers', 'onnx'] def __init__( self : Dict , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Optional[int] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : List[str] = ['torch', 'transformers', 'onnx'] def __init__( self : List[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : int ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Any , *UpperCAmelCase : List[Any] , **UpperCAmelCase : str ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Dict = ['torch', 'transformers', 'onnx'] def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : Tuple ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : Dict ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : int , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : List[str] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : List[Any] = ['torch', 'transformers', 'onnx'] def __init__( self : str , *UpperCAmelCase : str , **UpperCAmelCase : List[Any] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : List[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "transformers", "onnx"] )
329
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a :Optional[Any] = { 'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Tuple = [ 'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MegatronBertForCausalLM', 'MegatronBertForMaskedLM', 'MegatronBertForMultipleChoice', 'MegatronBertForNextSentencePrediction', 'MegatronBertForPreTraining', 'MegatronBertForQuestionAnswering', 'MegatronBertForSequenceClassification', 'MegatronBertForTokenClassification', 'MegatronBertModel', 'MegatronBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_megatron_bert import ( MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, MegatronBertPreTrainedModel, ) else: import sys __a :Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
329
import itertools import math def __snake_case ( __UpperCamelCase : int ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 ,int(math.sqrt(__UpperCamelCase ) + 1 ) ,6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __snake_case ( ): """simple docstring""" A_ = 2 while True: if is_prime(__UpperCamelCase ): yield num num += 1 def __snake_case ( __UpperCamelCase : int = 1_0001 ): """simple docstring""" return next(itertools.islice(prime_generator() ,nth - 1 ,__UpperCamelCase ) ) if __name__ == "__main__": print(F"{solution() = }")
329
1
from ..utils import DummyObject, requires_backends class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Union[str, Any] = ['torch', 'transformers', 'onnx'] def __init__( self : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Tuple , *UpperCAmelCase : Tuple , **UpperCAmelCase : Union[str, Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Dict , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Tuple ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Tuple = ['torch', 'transformers', 'onnx'] def __init__( self : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : List[Any] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Tuple , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Any = ['torch', 'transformers', 'onnx'] def __init__( self : Dict , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Optional[int] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : List[str] = ['torch', 'transformers', 'onnx'] def __init__( self : List[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : int ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Any , *UpperCAmelCase : List[Any] , **UpperCAmelCase : str ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Dict = ['torch', 'transformers', 'onnx'] def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : Tuple ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : Dict ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : int , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : List[str] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : List[Any] = ['torch', 'transformers', 'onnx'] def __init__( self : str , *UpperCAmelCase : str , **UpperCAmelCase : List[Any] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : List[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "transformers", "onnx"] )
329
from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class _a : """simple docstring""" def __init__( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : List[str]=13 , UpperCAmelCase : Tuple=7 , UpperCAmelCase : int=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Optional[Any]=99 , UpperCAmelCase : str=32 , UpperCAmelCase : Dict=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Optional[int]=37 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : int=16 , UpperCAmelCase : Any=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : List[Any]=None , ): A_ = parent A_ = 13 A_ = 7 A_ = True A_ = True A_ = True A_ = True A_ = 99 A_ = 384 A_ = 2 A_ = 4 A_ = 37 A_ = "gelu" A_ = 0.1 A_ = 0.1 A_ = 512 A_ = 16 A_ = 2 A_ = 0.02 A_ = 3 A_ = 4 A_ = 128 A_ = 2 A_ = 9 A_ = 1 A_ = None def __A ( self : Optional[int] ): A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ = None if self.use_input_mask: A_ = random_attention_mask([self.batch_size, self.seq_length] ) A_ = None if self.use_token_type_ids: A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A_ = None A_ = None A_ = None if self.use_labels: A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A_ = ids_tensor([self.batch_size] , self.num_choices ) A_ = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ): A_ = TFConvBertModel(config=UpperCAmelCase ) A_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} A_ = [input_ids, input_mask] A_ = model(UpperCAmelCase ) A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Tuple ): A_ = TFConvBertForMaskedLM(config=UpperCAmelCase ) A_ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : int ): A_ = self.num_labels A_ = TFConvBertForSequenceClassification(config=UpperCAmelCase ) A_ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ): A_ = self.num_choices A_ = TFConvBertForMultipleChoice(config=UpperCAmelCase ) A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) A_ = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __A ( self : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str ): A_ = self.num_labels A_ = TFConvBertForTokenClassification(config=UpperCAmelCase ) A_ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __A ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ): A_ = TFConvBertForQuestionAnswering(config=UpperCAmelCase ) A_ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __A ( self : List[str] ): A_ = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) = config_and_inputs A_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class _a ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : Union[str, Any] = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) _lowerCamelCase : Any = ( { 'feature-extraction': TFConvBertModel, 'fill-mask': TFConvBertForMaskedLM, 'question-answering': TFConvBertForQuestionAnswering, 'text-classification': TFConvBertForSequenceClassification, 'token-classification': TFConvBertForTokenClassification, 'zero-shot': TFConvBertForSequenceClassification, } if is_tf_available() else {} ) _lowerCamelCase : Dict = False _lowerCamelCase : Optional[int] = False _lowerCamelCase : Dict = False def __A ( self : List[str] ): A_ = TFConvBertModelTester(self ) A_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 ) def __A ( self : Tuple ): self.config_tester.run_common_tests() def __A ( self : Tuple ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase ) def __A ( self : Dict ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase ) def __A ( self : List[Any] ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase ) def __A ( self : Dict ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase ) def __A ( self : int ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase ) def __A ( self : List[Any] ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase ) @slow def __A ( self : str ): A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() A_ = True A_ = True if hasattr(UpperCAmelCase , "use_cache" ): A_ = True A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase ) for model_class in self.all_model_classes: A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) A_ = model_class(UpperCAmelCase ) A_ = len(model(UpperCAmelCase ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCAmelCase , saved_model=UpperCAmelCase ) A_ = os.path.join(UpperCAmelCase , "saved_model" , "1" ) A_ = tf.keras.models.load_model(UpperCAmelCase ) A_ = model(UpperCAmelCase ) if self.is_encoder_decoder: A_ = outputs["encoder_hidden_states"] A_ = outputs["encoder_attentions"] else: A_ = outputs["hidden_states"] A_ = outputs["attentions"] self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase ) A_ = getattr( self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def __A ( self : List[str] ): A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) self.assertIsNotNone(UpperCAmelCase ) def __A ( self : Any ): A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() A_ = True A_ = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length ) A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase ) A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase ) def check_decoder_attentions_output(UpperCAmelCase : Optional[int] ): A_ = len(UpperCAmelCase ) self.assertEqual(out_len % 2 , 0 ) A_ = outputs.decoder_attentions self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(UpperCAmelCase : Optional[Any] ): A_ = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: A_ = True A_ = False A_ = model_class(UpperCAmelCase ) A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) A_ = len(UpperCAmelCase ) self.assertEqual(config.output_hidden_states , UpperCAmelCase ) check_encoder_attentions_output(UpperCAmelCase ) if self.is_encoder_decoder: A_ = model_class(UpperCAmelCase ) A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , UpperCAmelCase ) check_decoder_attentions_output(UpperCAmelCase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] A_ = True A_ = model_class(UpperCAmelCase ) A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , UpperCAmelCase ) check_encoder_attentions_output(UpperCAmelCase ) # Check attention is always last and order is fine A_ = True A_ = True A_ = model_class(UpperCAmelCase ) A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase ) ) self.assertEqual(model.config.output_hidden_states , UpperCAmelCase ) check_encoder_attentions_output(UpperCAmelCase ) @require_tf class _a ( unittest.TestCase ): """simple docstring""" @slow def __A ( self : Dict ): A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) A_ = tf.constant([[0, 1, 2, 3, 4, 5]] ) A_ = model(UpperCAmelCase )[0] A_ = [1, 6, 768] self.assertEqual(output.shape , UpperCAmelCase ) A_ = tf.constant( [ [ [-0.03_475_493, -0.4_686_034, -0.30_638_832], [0.22_637_248, -0.26_988_646, -0.7_423_424], [0.10_324_868, -0.45_013_508, -0.58_280_784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-4 )
329
1
from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : int = '' _lowerCamelCase : Tuple = 'hf-legacy' # "hf://"" is reserved for hffs def __init__( self : int , UpperCAmelCase : Optional[DatasetInfo] = None , UpperCAmelCase : Optional[str] = None , **UpperCAmelCase : Union[str, Any] , ): super().__init__(self , **UpperCAmelCase ) A_ = repo_info A_ = token A_ = None def __A ( self : Optional[Any] ): if self.dir_cache is None: A_ = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes A_ = { "name": hf_file.rfilename, "size": None, "type": "file", } self.dir_cache.update( { str(UpperCAmelCase ): {"name": str(UpperCAmelCase ), "size": None, "type": "directory"} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def __A ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : str = "rb" , **UpperCAmelCase : str , ): if not isinstance(self.repo_info , UpperCAmelCase ): raise NotImplementedError(f'''Open is only implemented for dataset repositories, but got {self.repo_info}''' ) A_ = hf_hub_url(self.repo_info.id , UpperCAmelCase , revision=self.repo_info.sha ) return fsspec.open( UpperCAmelCase , mode=UpperCAmelCase , headers=get_authentication_headers_for_url(UpperCAmelCase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open() def __A ( self : Tuple , UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[Any] ): self._get_dirs() A_ = self._strip_protocol(UpperCAmelCase ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(UpperCAmelCase ) def __A ( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : int=False , **UpperCAmelCase : Optional[Any] ): self._get_dirs() A_ = PurePosixPath(path.strip("/" ) ) A_ = {} for p, f in self.dir_cache.items(): A_ = PurePosixPath(p.strip("/" ) ) A_ = p.parent if root == path: A_ = f A_ = list(paths.values() ) if detail: return out else: return sorted(f["name"] for f in out )
329
from ...configuration_utils import PretrainedConfig from ...utils import logging __a :Dict = logging.get_logger(__name__) __a :int = { 'google/realm-cc-news-pretrained-embedder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-encoder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-scorer': ( 'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-openqa': ( 'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json' ), 'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json', 'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json', 'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json', 'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json', # See all REALM models at https://huggingface.co/models?filter=realm } class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : List[Any] = 'realm' def __init__( self : Union[str, Any] , UpperCAmelCase : Optional[Any]=30522 , UpperCAmelCase : List[str]=768 , UpperCAmelCase : Optional[Any]=128 , UpperCAmelCase : str=12 , UpperCAmelCase : Dict=12 , UpperCAmelCase : Optional[Any]=8 , UpperCAmelCase : Any=3072 , UpperCAmelCase : Union[str, Any]="gelu_new" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : int=512 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=1E-12 , UpperCAmelCase : List[Any]=256 , UpperCAmelCase : Optional[int]=10 , UpperCAmelCase : List[str]=1E-3 , UpperCAmelCase : Any=5 , UpperCAmelCase : List[Any]=320 , UpperCAmelCase : Optional[Any]=13353718 , UpperCAmelCase : Tuple=5000 , UpperCAmelCase : List[str]=1 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Union[str, Any]=2 , **UpperCAmelCase : List[str] , ): super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase ) # Common config A_ = vocab_size A_ = max_position_embeddings A_ = hidden_size A_ = retriever_proj_size A_ = num_hidden_layers A_ = num_attention_heads A_ = num_candidates A_ = intermediate_size A_ = hidden_act A_ = hidden_dropout_prob A_ = attention_probs_dropout_prob A_ = initializer_range A_ = type_vocab_size A_ = layer_norm_eps # Reader config A_ = span_hidden_size A_ = max_span_width A_ = reader_layer_norm_eps A_ = reader_beam_size A_ = reader_seq_len # Retrieval config A_ = num_block_records A_ = searcher_beam_size
329
1
import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class _a ( unittest.TestCase ): """simple docstring""" _lowerCamelCase : Optional[Any] = JukeboxTokenizer _lowerCamelCase : Optional[Any] = { 'artist': 'Zac Brown Band', 'genres': 'Country', 'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ', } @require_torch def __A ( self : Tuple ): import torch A_ = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" ) A_ = tokenizer(**self.metas )["input_ids"] # fmt: off A_ = [ torch.tensor([[ 0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def __A ( self : int ): import torch A_ = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" ) A_ = tokenizer(**self.metas )["input_ids"] # fmt: off A_ = [ torch.tensor([[ 0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
329
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() __a :Optional[Any] = logging.get_logger(__name__) def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ): """simple docstring""" A_ = original_name.split("." )[0] A_ = key.split("." ) A_ = int(key_list[key_list.index(__UpperCamelCase ) - 2] ) A_ = int(key_list[key_list.index(__UpperCamelCase ) - 1] ) A_ = orig_block_num - offset A_ = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' ,f'''block.{new_block_num}.{layer_num}.{new_name}''' ) return key def __snake_case ( __UpperCamelCase : Any ): """simple docstring""" A_ = OrderedDict() A_ , A_ = 0, 0 for key, value in state_dict.items(): if key.startswith("network" ): A_ = key.replace("network" ,"poolformer.encoder" ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith("bias" ) and "patch_embed" not in key: patch_emb_offset += 1 A_ = key[: key.find("proj" )] A_ = key.replace(__UpperCamelCase ,f'''patch_embeddings.{total_embed_found}.''' ) A_ = key.replace("proj" ,"projection" ) if key.endswith("bias" ): total_embed_found += 1 if "patch_embeddings" in key: A_ = "poolformer.encoder." + key if "mlp.fc1" in key: A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc1" ,"output.conv1" ) if "mlp.fc2" in key: A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"mlp.fc2" ,"output.conv2" ) if "norm1" in key: A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm1" ,"before_norm" ) if "norm2" in key: A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"norm2" ,"after_norm" ) if "layer_scale_1" in key: A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_1" ,"layer_scale_1" ) if "layer_scale_2" in key: A_ = replace_key_with_offset(__UpperCamelCase ,__UpperCamelCase ,"layer_scale_2" ,"layer_scale_2" ) if "head" in key: A_ = key.replace("head" ,"classifier" ) A_ = value return new_state_dict def __snake_case ( ): """simple docstring""" A_ = "http://images.cocodataset.org/val2017/000000039769.jpg" A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw ) return image @torch.no_grad() def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ): """simple docstring""" A_ = PoolFormerConfig() # set attributes based on model_name A_ = "huggingface/label-files" A_ = model_name[-3:] A_ = 1000 A_ = "imagenet-1k-id2label.json" A_ = (1, 1000) # set config attributes A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) ) A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()} A_ = idalabel A_ = {v: k for k, v in idalabel.items()} if size == "s12": A_ = [2, 2, 6, 2] A_ = [64, 128, 320, 512] A_ = 4.0 A_ = 0.9 elif size == "s24": A_ = [4, 4, 12, 4] A_ = [64, 128, 320, 512] A_ = 4.0 A_ = 0.9 elif size == "s36": A_ = [6, 6, 18, 6] A_ = [64, 128, 320, 512] A_ = 4.0 A_ = 1E-6 A_ = 0.9 elif size == "m36": A_ = [6, 6, 18, 6] A_ = [96, 192, 384, 768] A_ = 4.0 A_ = 1E-6 A_ = 0.95 elif size == "m48": A_ = [8, 8, 24, 8] A_ = [96, 192, 384, 768] A_ = 4.0 A_ = 1E-6 A_ = 0.95 else: raise ValueError(f'''Size {size} not supported''' ) # load image processor A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase ) # Prepare image A_ = prepare_img() A_ = image_processor(images=__UpperCamelCase ,return_tensors="pt" ).pixel_values logger.info(f'''Converting model {model_name}...''' ) # load original state dict A_ = torch.load(__UpperCamelCase ,map_location=torch.device("cpu" ) ) # rename keys A_ = rename_keys(__UpperCamelCase ) # create HuggingFace model and load state dict A_ = PoolFormerForImageClassification(__UpperCamelCase ) model.load_state_dict(__UpperCamelCase ) model.eval() # Define image processor A_ = PoolFormerImageProcessor(crop_pct=__UpperCamelCase ) A_ = image_processor(images=prepare_img() ,return_tensors="pt" ).pixel_values # forward pass A_ = model(__UpperCamelCase ) A_ = outputs.logits # define expected logit slices for different models if size == "s12": A_ = torch.tensor([-0.3045, -0.6758, -0.4869] ) elif size == "s24": A_ = torch.tensor([0.4402, -0.1374, -0.8045] ) elif size == "s36": A_ = torch.tensor([-0.6080, -0.5133, -0.5898] ) elif size == "m36": A_ = torch.tensor([0.3952, 0.2263, -1.2668] ) elif size == "m48": A_ = torch.tensor([0.1167, -0.0656, -0.3423] ) else: raise ValueError(f'''Size {size} not supported''' ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1E-2 ) # finally, save model and image processor logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) model.save_pretrained(__UpperCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __a :Union[str, Any] = argparse.ArgumentParser() parser.add_argument( '--model_name', default='poolformer_s12', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) __a :int = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
329
1
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib __a :Union[str, Any] = get_logger() __a :Optional[dict] = None class _a ( TensorFormatter[Mapping, 'jax.Array', Mapping] ): """simple docstring""" def __init__( self : Tuple , UpperCAmelCase : Any=None , UpperCAmelCase : Dict=None , **UpperCAmelCase : List[str] ): super().__init__(features=UpperCAmelCase ) import jax from jaxlib.xla_client import Device if isinstance(UpperCAmelCase , UpperCAmelCase ): raise ValueError( f'''Expected {device} to be a `str` not {type(UpperCAmelCase )}, as `jaxlib.xla_extension.Device` ''' "is not serializable neither with `pickle` nor with `dill`. Instead you can surround " "the device with `str()` to get its string identifier that will be internally mapped " "to the actual `jaxlib.xla_extension.Device`." ) A_ = device if isinstance(UpperCAmelCase , UpperCAmelCase ) else str(jax.devices()[0] ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: A_ = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys() ): logger.warning( f'''Device with string identifier {self.device} not listed among the available ''' f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default ''' f'''device: {str(jax.devices()[0] )}.''' ) A_ = str(jax.devices()[0] ) A_ = jnp_array_kwargs @staticmethod def __A ( ): import jax return {str(UpperCAmelCase ): device for device in jax.devices()} def __A ( self : Union[str, Any] , UpperCAmelCase : str ): import jax import jax.numpy as jnp if isinstance(UpperCAmelCase , UpperCAmelCase ) and column: if all( isinstance(UpperCAmelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return jnp.stack(UpperCAmelCase , axis=0 ) return column def __A ( self : Any , UpperCAmelCase : Tuple ): import jax import jax.numpy as jnp if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase )) ): return value elif isinstance(UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() A_ = {} if isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: A_ = {"dtype": jnp.intaa} else: A_ = {"dtype": jnp.intaa} elif isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): A_ = {"dtype": jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(UpperCAmelCase , PIL.Image.Image ): A_ = np.asarray(UpperCAmelCase ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: A_ = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device] ): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(UpperCAmelCase , **{**default_dtype, **self.jnp_array_kwargs} ) def __A ( self : str , UpperCAmelCase : Tuple ): import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(UpperCAmelCase , torch.Tensor ): return self._tensorize(data_struct.detach().cpu().numpy()[()] ) if hasattr(UpperCAmelCase , "__array__" ) and not isinstance(UpperCAmelCase , jax.Array ): A_ = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(UpperCAmelCase , np.ndarray ): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] ) elif isinstance(UpperCAmelCase , (list, tuple) ): return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] ) return self._tensorize(UpperCAmelCase ) def __A ( self : Tuple , UpperCAmelCase : dict ): return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase ) def __A ( self : int , UpperCAmelCase : pa.Table ): A_ = self.numpy_arrow_extractor().extract_row(UpperCAmelCase ) A_ = self.python_features_decoder.decode_row(UpperCAmelCase ) return self.recursive_tensorize(UpperCAmelCase ) def __A ( self : Any , UpperCAmelCase : pa.Table ): A_ = self.numpy_arrow_extractor().extract_column(UpperCAmelCase ) A_ = self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0] ) A_ = self.recursive_tensorize(UpperCAmelCase ) A_ = self._consolidate(UpperCAmelCase ) return column def __A ( self : Tuple , UpperCAmelCase : pa.Table ): A_ = self.numpy_arrow_extractor().extract_batch(UpperCAmelCase ) A_ = self.python_features_decoder.decode_batch(UpperCAmelCase ) A_ = self.recursive_tensorize(UpperCAmelCase ) for column_name in batch: A_ = self._consolidate(batch[column_name] ) return batch
329
import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : torch.FloatTensor _lowerCamelCase : Optional[torch.FloatTensor] = None def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Any=0.999 ,__UpperCamelCase : Any="cosine" ,): """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(__UpperCamelCase : Any ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(__UpperCamelCase : int ): return math.exp(t * -12.0 ) else: raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) A_ = [] for i in range(__UpperCamelCase ): A_ = i / num_diffusion_timesteps A_ = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(__UpperCamelCase ) / alpha_bar_fn(__UpperCamelCase ) ,__UpperCamelCase ) ) return torch.tensor(__UpperCamelCase ,dtype=torch.floataa ) class _a ( snake_case_ , snake_case_ ): """simple docstring""" @register_to_config def __init__( self : Optional[int] , UpperCAmelCase : int = 1000 , UpperCAmelCase : str = "fixed_small_log" , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[float] = 1.0 , UpperCAmelCase : str = "epsilon" , UpperCAmelCase : str = "squaredcos_cap_v2" , ): if beta_schedule != "squaredcos_cap_v2": raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" ) A_ = betas_for_alpha_bar(UpperCAmelCase ) A_ = 1.0 - self.betas A_ = torch.cumprod(self.alphas , dim=0 ) A_ = torch.tensor(1.0 ) # standard deviation of the initial noise distribution A_ = 1.0 # setable values A_ = None A_ = torch.from_numpy(np.arange(0 , UpperCAmelCase )[::-1].copy() ) A_ = variance_type def __A ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None ): return sample def __A ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, torch.device] = None ): A_ = num_inference_steps A_ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) A_ = (np.arange(0 , UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa ) A_ = torch.from_numpy(UpperCAmelCase ).to(UpperCAmelCase ) def __A ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str=None , UpperCAmelCase : Any=None , UpperCAmelCase : List[Any]=None ): if prev_timestep is None: A_ = t - 1 A_ = self.alphas_cumprod[t] A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one A_ = 1 - alpha_prod_t A_ = 1 - alpha_prod_t_prev if prev_timestep == t - 1: A_ = self.betas[t] else: A_ = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample A_ = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: A_ = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": A_ = torch.log(torch.clamp(UpperCAmelCase , min=1E-20 ) ) A_ = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler A_ = variance.log() A_ = beta.log() A_ = (predicted_variance + 1) / 2 A_ = frac * max_log + (1 - frac) * min_log return variance def __A ( self : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Dict=None , UpperCAmelCase : bool = True , ): A_ = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": A_ , A_ = torch.split(UpperCAmelCase , sample.shape[1] , dim=1 ) else: A_ = None # 1. compute alphas, betas if prev_timestep is None: A_ = t - 1 A_ = self.alphas_cumprod[t] A_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one A_ = 1 - alpha_prod_t A_ = 1 - alpha_prod_t_prev if prev_timestep == t - 1: A_ = self.betas[t] A_ = self.alphas[t] else: A_ = 1 - alpha_prod_t / alpha_prod_t_prev A_ = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": A_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": A_ = model_output else: raise ValueError( f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`''' " for the UnCLIPScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: A_ = torch.clamp( UpperCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf A_ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t A_ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf A_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise A_ = 0 if t > 0: A_ = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=UpperCAmelCase , device=model_output.device ) A_ = self._get_variance( UpperCAmelCase , predicted_variance=UpperCAmelCase , prev_timestep=UpperCAmelCase , ) if self.variance_type == "fixed_small_log": A_ = variance elif self.variance_type == "learned_range": A_ = (0.5 * variance).exp() else: raise ValueError( f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`''' " for the UnCLIPScheduler." ) A_ = variance * variance_noise A_ = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=UpperCAmelCase , pred_original_sample=UpperCAmelCase ) def __A ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.IntTensor , ): # Make sure alphas_cumprod and timestep have same device and dtype as original_samples A_ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) A_ = timesteps.to(original_samples.device ) A_ = alphas_cumprod[timesteps] ** 0.5 A_ = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): A_ = sqrt_alpha_prod.unsqueeze(-1 ) A_ = (1 - alphas_cumprod[timesteps]) ** 0.5 A_ = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): A_ = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) A_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
329
1
import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : str ): debug_launcher(test_script.main ) def __A ( self : List[str] ): debug_launcher(test_ops.main )
329
from math import isqrt, loga def __snake_case ( __UpperCamelCase : int ): """simple docstring""" A_ = [True] * max_number for i in range(2 ,isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 ,__UpperCamelCase ,__UpperCamelCase ): A_ = False return [i for i in range(2 ,__UpperCamelCase ) if is_prime[i]] def __snake_case ( __UpperCamelCase : int = 80_0800 ,__UpperCamelCase : int = 80_0800 ): """simple docstring""" A_ = degree * loga(__UpperCamelCase ) A_ = int(__UpperCamelCase ) A_ = calculate_prime_numbers(__UpperCamelCase ) A_ = 0 A_ = 0 A_ = len(__UpperCamelCase ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(F"{solution() = }")
329
1
import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def __snake_case ( __UpperCamelCase : dict ): """simple docstring""" return (data["data"], data["target"]) def __snake_case ( __UpperCamelCase : np.ndarray ,__UpperCamelCase : np.ndarray ,__UpperCamelCase : np.ndarray ): """simple docstring""" A_ = XGBRegressor(verbosity=0 ,random_state=42 ) xgb.fit(__UpperCamelCase ,__UpperCamelCase ) # Predict target for test data A_ = xgb.predict(__UpperCamelCase ) A_ = predictions.reshape(len(__UpperCamelCase ) ,1 ) return predictions def __snake_case ( ): """simple docstring""" A_ = fetch_california_housing() A_ , A_ = data_handling(__UpperCamelCase ) A_ , A_ , A_ , A_ = train_test_split( __UpperCamelCase ,__UpperCamelCase ,test_size=0.25 ,random_state=1 ) A_ = xgboost(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # Error printing print(f'''Mean Absolute Error : {mean_absolute_error(__UpperCamelCase ,__UpperCamelCase )}''' ) print(f'''Mean Square Error : {mean_squared_error(__UpperCamelCase ,__UpperCamelCase )}''' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
329
import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() __a :str = logging.get_logger(__name__) def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ): """simple docstring""" A_ = RobertaPreLayerNormConfig.from_pretrained( __UpperCamelCase ,architectures=["RobertaPreLayerNormForMaskedLM"] ) # convert state_dict A_ = torch.load(hf_hub_download(repo_id=__UpperCamelCase ,filename="pytorch_model.bin" ) ) A_ = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith("roberta." ): A_ = "roberta_prelayernorm." + tensor_key[len("roberta." ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ): continue A_ = tensor_value A_ = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=__UpperCamelCase ,config=__UpperCamelCase ,state_dict=__UpperCamelCase ) model.save_pretrained(__UpperCamelCase ) # convert tokenizer A_ = AutoTokenizer.from_pretrained(__UpperCamelCase ) tokenizer.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __a :Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint-repo', default=None, type=str, required=True, help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __a :Any = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
329
1
import argparse import torch from transformers import ( UniSpeechSatConfig, UniSpeechSatForAudioFrameClassification, UniSpeechSatForSequenceClassification, UniSpeechSatForXVector, WavaVecaFeatureExtractor, logging, ) logging.set_verbosity_info() __a :Optional[int] = logging.get_logger(__name__) def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Any ,__UpperCamelCase : Dict ): """simple docstring""" A_ = UniSpeechSatForSequenceClassification.from_pretrained(__UpperCamelCase ,config=__UpperCamelCase ) A_ = downstream_dict["projector.weight"] A_ = downstream_dict["projector.bias"] A_ = downstream_dict["model.post_net.linear.weight"] A_ = downstream_dict["model.post_net.linear.bias"] return model def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[Any] ): """simple docstring""" A_ = UniSpeechSatForAudioFrameClassification.from_pretrained(__UpperCamelCase ,config=__UpperCamelCase ) A_ = downstream_dict["model.linear.weight"] A_ = downstream_dict["model.linear.bias"] return model def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : List[Any] ): """simple docstring""" A_ = UniSpeechSatForXVector.from_pretrained(__UpperCamelCase ,config=__UpperCamelCase ) A_ = downstream_dict["connector.weight"] A_ = downstream_dict["connector.bias"] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): A_ = downstream_dict[ f'''model.framelevel_feature_extractor.module.{i}.kernel.weight''' ] A_ = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias'''] A_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"] A_ = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"] A_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"] A_ = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"] A_ = downstream_dict["objective.W"] return model @torch.no_grad() def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Tuple ,__UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ): """simple docstring""" A_ = torch.load(__UpperCamelCase ,map_location="cpu" ) A_ = checkpoint["Downstream"] A_ = UniSpeechSatConfig.from_pretrained(__UpperCamelCase ) A_ = WavaVecaFeatureExtractor.from_pretrained( __UpperCamelCase ,return_attention_mask=__UpperCamelCase ,do_normalize=__UpperCamelCase ) A_ = hf_config.architectures[0] if arch.endswith("ForSequenceClassification" ): A_ = convert_classification(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) elif arch.endswith("ForAudioFrameClassification" ): A_ = convert_diarization(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) elif arch.endswith("ForXVector" ): A_ = convert_xvector(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) else: raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' ) if hf_config.use_weighted_layer_sum: A_ = checkpoint["Featurizer"]["weights"] hf_feature_extractor.save_pretrained(__UpperCamelCase ) hf_model.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __a :Tuple = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') __a :Union[str, Any] = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
329
from maths.prime_factors import prime_factors def __snake_case ( __UpperCamelCase : int ): """simple docstring""" if not isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = f'''Input value of [number={number}] must be an integer''' raise TypeError(__UpperCamelCase ) if number < 1: raise ValueError("Input must be a positive integer" ) return -1 if len(prime_factors(__UpperCamelCase ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
329
1
from functools import lru_cache @lru_cache def __snake_case ( __UpperCamelCase : int ): """simple docstring""" if num < 0: raise ValueError("Number should not be negative." ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
329
import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore __a :int = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" __a :Any = [file for file in filepaths if file != file.lower()] if upper_files: print(F"{len(upper_files)} files contain uppercase characters:") print('\n'.join(upper_files) + '\n') __a :Tuple = [file for file in filepaths if ' ' in file] if space_files: print(F"{len(space_files)} files contain space characters:") print('\n'.join(space_files) + '\n') __a :str = [file for file in filepaths if '-' in file] if hyphen_files: print(F"{len(hyphen_files)} files contain hyphen characters:") print('\n'.join(hyphen_files) + '\n') __a :List[str] = [file for file in filepaths if os.sep not in file] if nodir_files: print(F"{len(nodir_files)} files are not in a directory:") print('\n'.join(nodir_files) + '\n') __a :Any = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
329
1
import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class _a ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : List[Any] = IFPipeline _lowerCamelCase : Optional[Any] = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'} _lowerCamelCase : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS _lowerCamelCase : Optional[Any] = PipelineTesterMixin.required_optional_params - {'latents'} def __A ( self : Optional[Any] ): return self._get_dummy_components() def __A ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : List[Any]=0 ): if str(UpperCAmelCase ).startswith("mps" ): A_ = torch.manual_seed(UpperCAmelCase ) else: A_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase ) A_ = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def __A ( self : List[Any] ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def __A ( self : List[Any] ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __A ( self : Union[str, Any] ): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __A ( self : List[Any] ): self._test_save_load_local() def __A ( self : Union[str, Any] ): self._test_inference_batch_single_identical( expected_max_diff=1E-2 , ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def __A ( self : Optional[Any] ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @slow @require_torch_gpu class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : Optional[Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self : Tuple ): # if A_ = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa ) A_ = IFSuperResolutionPipeline.from_pretrained( "DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("cuda" ) A_ , A_ = pipe_a.encode_prompt("anime turtle" , device="cuda" ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() A_ = None A_ = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img A_ = IFImgaImgPipeline(**pipe_a.components ) A_ = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting A_ = IFInpaintingPipeline(**pipe_a.components ) A_ = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def __A ( self : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Dict ): # pipeline 1 _start_torch_memory_measurement() A_ = torch.Generator(device="cpu" ).manual_seed(0 ) A_ = pipe_a( prompt_embeds=UpperCAmelCase , negative_prompt_embeds=UpperCAmelCase , num_inference_steps=2 , generator=UpperCAmelCase , output_type="np" , ) A_ = output.images[0] assert image.shape == (64, 64, 3) A_ = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 A_ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" ) assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase ) # pipeline 2 _start_torch_memory_measurement() A_ = torch.Generator(device="cpu" ).manual_seed(0 ) A_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCAmelCase ) A_ = pipe_a( prompt_embeds=UpperCAmelCase , negative_prompt_embeds=UpperCAmelCase , image=UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=2 , output_type="np" , ) A_ = output.images[0] assert image.shape == (256, 256, 3) A_ = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 A_ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" ) assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase ) def __A ( self : int , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : int ): # pipeline 1 _start_torch_memory_measurement() A_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCAmelCase ) A_ = torch.Generator(device="cpu" ).manual_seed(0 ) A_ = pipe_a( prompt_embeds=UpperCAmelCase , negative_prompt_embeds=UpperCAmelCase , image=UpperCAmelCase , num_inference_steps=2 , generator=UpperCAmelCase , output_type="np" , ) A_ = output.images[0] assert image.shape == (64, 64, 3) A_ = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 A_ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" ) assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase ) # pipeline 2 _start_torch_memory_measurement() A_ = torch.Generator(device="cpu" ).manual_seed(0 ) A_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCAmelCase ) A_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCAmelCase ) A_ = pipe_a( prompt_embeds=UpperCAmelCase , negative_prompt_embeds=UpperCAmelCase , image=UpperCAmelCase , original_image=UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=2 , output_type="np" , ) A_ = output.images[0] assert image.shape == (256, 256, 3) A_ = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 A_ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" ) assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase ) def __A ( self : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : int ): # pipeline 1 _start_torch_memory_measurement() A_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCAmelCase ) A_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(UpperCAmelCase ) A_ = torch.Generator(device="cpu" ).manual_seed(0 ) A_ = pipe_a( prompt_embeds=UpperCAmelCase , negative_prompt_embeds=UpperCAmelCase , image=UpperCAmelCase , mask_image=UpperCAmelCase , num_inference_steps=2 , generator=UpperCAmelCase , output_type="np" , ) A_ = output.images[0] assert image.shape == (64, 64, 3) A_ = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 A_ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" ) assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase ) # pipeline 2 _start_torch_memory_measurement() A_ = torch.Generator(device="cpu" ).manual_seed(0 ) A_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCAmelCase ) A_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCAmelCase ) A_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(UpperCAmelCase ) A_ = pipe_a( prompt_embeds=UpperCAmelCase , negative_prompt_embeds=UpperCAmelCase , image=UpperCAmelCase , mask_image=UpperCAmelCase , original_image=UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=2 , output_type="np" , ) A_ = output.images[0] assert image.shape == (256, 256, 3) A_ = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 A_ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" ) assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase ) def __snake_case ( ): """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
329
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __a :Union[str, Any] = { 'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'], 'tokenization_biogpt': ['BioGptTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a :Optional[int] = [ 'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BioGptForCausalLM', 'BioGptForTokenClassification', 'BioGptForSequenceClassification', 'BioGptModel', 'BioGptPreTrainedModel', ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys __a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
329
1
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Optional[Any] = (DPMSolverSDEScheduler,) _lowerCamelCase : Tuple = 1_0 def __A ( self : str , **UpperCAmelCase : Optional[int] ): A_ = { "num_train_timesteps": 1100, "beta_start": 0.0_001, "beta_end": 0.02, "beta_schedule": "linear", "noise_sampler_seed": 0, } config.update(**UpperCAmelCase ) return config def __A ( self : Dict ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=UpperCAmelCase ) def __A ( self : str ): for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ): self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase ) def __A ( self : List[str] ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=UpperCAmelCase ) def __A ( self : Tuple ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=UpperCAmelCase ) def __A ( self : Optional[Any] ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) A_ = self.dummy_model() A_ = self.dummy_sample_deter * scheduler.init_noise_sigma A_ = sample.to(UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): A_ = scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase ) A_ = model(UpperCAmelCase , UpperCAmelCase ) A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) A_ = output.prev_sample A_ = torch.sum(torch.abs(UpperCAmelCase ) ) A_ = torch.mean(torch.abs(UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1E-2 assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1E-2 assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1E-3 else: assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2 assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3 def __A ( self : Any ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config(prediction_type="v_prediction" ) A_ = scheduler_class(**UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) A_ = self.dummy_model() A_ = self.dummy_sample_deter * scheduler.init_noise_sigma A_ = sample.to(UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): A_ = scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase ) A_ = model(UpperCAmelCase , UpperCAmelCase ) A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) A_ = output.prev_sample A_ = torch.sum(torch.abs(UpperCAmelCase ) ) A_ = torch.mean(torch.abs(UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1E-2 assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1E-2 assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1E-3 else: assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1E-2 assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1E-3 def __A ( self : Dict ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase ) A_ = self.dummy_model() A_ = self.dummy_sample_deter.to(UpperCAmelCase ) * scheduler.init_noise_sigma for t in scheduler.timesteps: A_ = scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase ) A_ = model(UpperCAmelCase , UpperCAmelCase ) A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) A_ = output.prev_sample A_ = torch.sum(torch.abs(UpperCAmelCase ) ) A_ = torch.mean(torch.abs(UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1E-2 assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1E-2 assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1E-3 else: assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2 assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3 def __A ( self : List[Any] ): A_ = self.scheduler_classes[0] A_ = self.get_scheduler_config() A_ = scheduler_class(**UpperCAmelCase , use_karras_sigmas=UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase ) A_ = self.dummy_model() A_ = self.dummy_sample_deter.to(UpperCAmelCase ) * scheduler.init_noise_sigma A_ = sample.to(UpperCAmelCase ) for t in scheduler.timesteps: A_ = scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase ) A_ = model(UpperCAmelCase , UpperCAmelCase ) A_ = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) A_ = output.prev_sample A_ = torch.sum(torch.abs(UpperCAmelCase ) ) A_ = torch.mean(torch.abs(UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1E-2 assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1E-2 assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2 else: assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1E-2 assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
329
import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def __snake_case ( __UpperCamelCase : Union[str, Any] ): """simple docstring""" if is_torch_version("<" ,"2.0.0" ) or not hasattr(__UpperCamelCase ,"_dynamo" ): return False return isinstance(__UpperCamelCase ,torch._dynamo.eval_frame.OptimizedModule ) def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : bool = True ): """simple docstring""" A_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) A_ = is_compiled_module(__UpperCamelCase ) if is_compiled: A_ = model A_ = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = model.module if not keep_fpaa_wrapper: A_ = getattr(__UpperCamelCase ,"forward" ) A_ = model.__dict__.pop("_original_forward" ,__UpperCamelCase ) if original_forward is not None: while hasattr(__UpperCamelCase ,"__wrapped__" ): A_ = forward.__wrapped__ if forward == original_forward: break A_ = forward if getattr(__UpperCamelCase ,"_converted_to_transformer_engine" ,__UpperCamelCase ): convert_model(__UpperCamelCase ,to_transformer_engine=__UpperCamelCase ) if is_compiled: A_ = model A_ = compiled_model return model def __snake_case ( ): """simple docstring""" PartialState().wait_for_everyone() def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Any ): """simple docstring""" if PartialState().distributed_type == DistributedType.TPU: xm.save(__UpperCamelCase ,__UpperCamelCase ) elif PartialState().local_process_index == 0: torch.save(__UpperCamelCase ,__UpperCamelCase ) @contextmanager def __snake_case ( **__UpperCamelCase : Any ): """simple docstring""" for key, value in kwargs.items(): A_ = str(__UpperCamelCase ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def __snake_case ( __UpperCamelCase : Optional[Any] ): """simple docstring""" if not hasattr(__UpperCamelCase ,"__qualname__" ) and not hasattr(__UpperCamelCase ,"__name__" ): A_ = getattr(__UpperCamelCase ,"__class__" ,__UpperCamelCase ) if hasattr(__UpperCamelCase ,"__qualname__" ): return obj.__qualname__ if hasattr(__UpperCamelCase ,"__name__" ): return obj.__name__ return str(__UpperCamelCase ) def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ): """simple docstring""" for key, value in source.items(): if isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = destination.setdefault(__UpperCamelCase ,{} ) merge_dicts(__UpperCamelCase ,__UpperCamelCase ) else: A_ = value return destination def __snake_case ( __UpperCamelCase : int = None ): """simple docstring""" if port is None: A_ = 2_9500 with socket.socket(socket.AF_INET ,socket.SOCK_STREAM ) as s: return s.connect_ex(("localhost", port) ) == 0
329
1
import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) __a :str = pytest.mark.integration @pytest.mark.parametrize("path" ,["paws", "csv"] ) def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : str ): """simple docstring""" inspect_dataset(__UpperCamelCase ,__UpperCamelCase ) A_ = path + ".py" assert script_name in os.listdir(__UpperCamelCase ) assert "__pycache__" not in os.listdir(__UpperCamelCase ) @pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" ) @pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" ) @pytest.mark.parametrize("path" ,["accuracy"] ) def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Tuple ): """simple docstring""" inspect_metric(__UpperCamelCase ,__UpperCamelCase ) A_ = path + ".py" assert script_name in os.listdir(__UpperCamelCase ) assert "__pycache__" not in os.listdir(__UpperCamelCase ) @pytest.mark.parametrize( "path, config_name, expected_splits" ,[ ("squad", "plain_text", ["train", "validation"]), ("dalle-mini/wit", "dalle-mini--wit", ["train"]), ("paws", "labeled_final", ["train", "test", "validation"]), ] ,) def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Dict ): """simple docstring""" A_ = get_dataset_config_info(__UpperCamelCase ,config_name=__UpperCamelCase ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( "path, config_name, expected_exception" ,[ ("paws", None, ValueError), ] ,) def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Union[str, Any] ): """simple docstring""" with pytest.raises(__UpperCamelCase ): get_dataset_config_info(__UpperCamelCase ,config_name=__UpperCamelCase ) @pytest.mark.parametrize( "path, expected" ,[ ("squad", "plain_text"), ("acronym_identification", "default"), ("lhoestq/squad", "plain_text"), ("lhoestq/test", "default"), ("lhoestq/demo1", "lhoestq--demo1"), ("dalle-mini/wit", "dalle-mini--wit"), ] ,) def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Tuple ): """simple docstring""" A_ = get_dataset_config_names(__UpperCamelCase ) assert expected in config_names @pytest.mark.parametrize( "path, expected_configs, expected_splits_in_first_config" ,[ ("squad", ["plain_text"], ["train", "validation"]), ("dalle-mini/wit", ["dalle-mini--wit"], ["train"]), ("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]), ] ,) def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : Any ): """simple docstring""" A_ = get_dataset_infos(__UpperCamelCase ) assert list(infos.keys() ) == expected_configs A_ = expected_configs[0] assert expected_config in infos A_ = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( "path, expected_config, expected_splits" ,[ ("squad", "plain_text", ["train", "validation"]), ("dalle-mini/wit", "dalle-mini--wit", ["train"]), ("paws", "labeled_final", ["train", "test", "validation"]), ] ,) def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : List[str] ,__UpperCamelCase : List[Any] ): """simple docstring""" A_ = get_dataset_infos(__UpperCamelCase ) assert expected_config in infos A_ = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( "path, config_name, expected_exception" ,[ ("paws", None, ValueError), ] ,) def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Dict ,__UpperCamelCase : Optional[int] ): """simple docstring""" with pytest.raises(__UpperCamelCase ): get_dataset_split_names(__UpperCamelCase ,config_name=__UpperCamelCase )
329
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : int ): A_ = tempfile.mkdtemp() A_ = BlipImageProcessor() A_ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" ) A_ = BlipProcessor(UpperCAmelCase , UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def __A ( self : Optional[int] , **UpperCAmelCase : Union[str, Any] ): return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).tokenizer def __A ( self : Optional[Any] , **UpperCAmelCase : int ): return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ).image_processor def __A ( self : Any ): shutil.rmtree(self.tmpdirname ) def __A ( self : Dict ): A_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] A_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def __A ( self : Any ): A_ = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) A_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 ) A_ = BlipProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase ) def __A ( self : Dict ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = self.prepare_image_inputs() A_ = image_processor(UpperCAmelCase , return_tensors="np" ) A_ = processor(images=UpperCAmelCase , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __A ( self : int ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = "lower newer" A_ = processor(text=UpperCAmelCase ) A_ = tokenizer(UpperCAmelCase , return_token_type_ids=UpperCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self : Tuple ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = "lower newer" A_ = self.prepare_image_inputs() A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase ): processor() def __A ( self : Any ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A_ = processor.batch_decode(UpperCAmelCase ) A_ = tokenizer.batch_decode(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) def __A ( self : Optional[Any] ): A_ = self.get_image_processor() A_ = self.get_tokenizer() A_ = BlipProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) A_ = "lower newer" A_ = self.prepare_image_inputs() A_ = processor(text=UpperCAmelCase , images=UpperCAmelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
329
1
import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger __a :str = get_logger(__name__) __a :Optional[Any] = R'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n' class _a : """simple docstring""" @add_start_docstrings(UpperCAmelCase ) def __call__( self : Any , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : jnp.ndarray ): raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class _a : """simple docstring""" @add_start_docstrings(UpperCAmelCase ) def __call__( self : Any , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : jnp.ndarray ): raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class _a ( snake_case_ ): """simple docstring""" @add_start_docstrings(UpperCAmelCase ) def __call__( self : List[str] , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ): for processor in self: A_ = inspect.signature(processor.__call__ ).parameters if len(UpperCAmelCase ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( f'''Make sure that all the required parameters: {list(function_args.keys() )} for ''' f'''{processor.__class__} are passed to the logits processor.''' ) A_ = processor(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) else: A_ = processor(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) return scores class _a ( snake_case_ ): """simple docstring""" def __init__( self : Optional[Any] , UpperCAmelCase : float ): if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not (temperature > 0): raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' ) A_ = temperature def __call__( self : List[Any] , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : int ): A_ = scores / self.temperature return scores class _a ( snake_case_ ): """simple docstring""" def __init__( self : str , UpperCAmelCase : float , UpperCAmelCase : float = -float("Inf" ) , UpperCAmelCase : int = 1 ): if not isinstance(UpperCAmelCase , UpperCAmelCase ) or (top_p < 0 or top_p > 1.0): raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' ) if not isinstance(UpperCAmelCase , UpperCAmelCase ) or (min_tokens_to_keep < 1): raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' ) A_ = top_p A_ = filter_value A_ = min_tokens_to_keep def __call__( self : Union[str, Any] , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : int ): A_ , A_ = lax.top_k(UpperCAmelCase , scores.shape[-1] ) A_ = jnp.full_like(UpperCAmelCase , self.filter_value ) A_ = jax.nn.softmax(UpperCAmelCase , axis=-1 ).cumsum(axis=-1 ) A_ = cumulative_probs < self.top_p # include the token that is higher than top_p as well A_ = jnp.roll(UpperCAmelCase , 1 ) score_mask |= score_mask.at[:, 0].set(UpperCAmelCase ) # min tokens to keep A_ = score_mask.at[:, : self.min_tokens_to_keep].set(UpperCAmelCase ) A_ = jnp.where(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) A_ = jax.lax.sort_key_val(UpperCAmelCase , UpperCAmelCase )[-1] return next_scores class _a ( snake_case_ ): """simple docstring""" def __init__( self : Any , UpperCAmelCase : int , UpperCAmelCase : float = -float("Inf" ) , UpperCAmelCase : int = 1 ): if not isinstance(UpperCAmelCase , UpperCAmelCase ) or top_k <= 0: raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' ) A_ = max(UpperCAmelCase , UpperCAmelCase ) A_ = filter_value def __call__( self : List[str] , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : int ): A_ , A_ = scores.shape A_ = jnp.full(batch_size * vocab_size , self.filter_value ) A_ = min(self.top_k , scores.shape[-1] ) # Safety check A_ , A_ = lax.top_k(UpperCAmelCase , UpperCAmelCase ) A_ = jnp.broadcast_to((jnp.arange(UpperCAmelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() A_ = topk_scores.flatten() A_ = topk_indices.flatten() + shift A_ = next_scores_flat.at[topk_indices_flat].set(UpperCAmelCase ) A_ = next_scores_flat.reshape(UpperCAmelCase , UpperCAmelCase ) return next_scores class _a ( snake_case_ ): """simple docstring""" def __init__( self : Tuple , UpperCAmelCase : int ): A_ = bos_token_id def __call__( self : Tuple , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : int ): A_ = jnp.full(scores.shape , -float("inf" ) ) A_ = 1 - jnp.bool_(cur_len - 1 ) A_ = jnp.where(UpperCAmelCase , new_scores.at[:, self.bos_token_id].set(0 ) , UpperCAmelCase ) return scores class _a ( snake_case_ ): """simple docstring""" def __init__( self : str , UpperCAmelCase : int , UpperCAmelCase : int ): A_ = max_length A_ = eos_token_id def __call__( self : Optional[int] , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : int ): A_ = jnp.full(scores.shape , -float("inf" ) ) A_ = 1 - jnp.bool_(cur_len - self.max_length + 1 ) A_ = jnp.where(UpperCAmelCase , new_scores.at[:, self.eos_token_id].set(0 ) , UpperCAmelCase ) return scores class _a ( snake_case_ ): """simple docstring""" def __init__( self : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : int ): if not isinstance(UpperCAmelCase , UpperCAmelCase ) or min_length < 0: raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' ) if not isinstance(UpperCAmelCase , UpperCAmelCase ) or eos_token_id < 0: raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' ) A_ = min_length A_ = eos_token_id def __call__( self : Union[str, Any] , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : int ): # create boolean flag to decide if min length penalty should be applied A_ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) A_ = jnp.where(UpperCAmelCase , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , UpperCAmelCase ) return scores class _a ( snake_case_ ): """simple docstring""" def __init__( self : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Dict ): A_ = list(UpperCAmelCase ) A_ = begin_index def __call__( self : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int ): A_ = 1 - jnp.bool_(cur_len - self.begin_index ) A_ = jnp.where(UpperCAmelCase , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , UpperCAmelCase ) return scores class _a ( snake_case_ ): """simple docstring""" def __init__( self : Optional[int] , UpperCAmelCase : list ): A_ = list(UpperCAmelCase ) def __call__( self : Dict , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : int ): A_ = scores.at[..., self.suppress_tokens].set(-float("inf" ) ) return scores class _a ( snake_case_ ): """simple docstring""" def __init__( self : List[str] , UpperCAmelCase : List[Any] ): A_ = dict(UpperCAmelCase ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. A_ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: A_ = force_token_array.at[index].set(UpperCAmelCase ) A_ = jnp.intaa(UpperCAmelCase ) def __call__( self : Optional[int] , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : jnp.ndarray , UpperCAmelCase : int ): def _force_token(UpperCAmelCase : str ): A_ = scores.shape[0] A_ = self.force_token_array[generation_idx] A_ = jnp.ones_like(UpperCAmelCase , dtype=scores.dtype ) * -float("inf" ) A_ = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) A_ = lax.dynamic_update_slice(UpperCAmelCase , UpperCAmelCase , (0, current_token) ) return new_scores A_ = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(UpperCAmelCase ) , lambda: scores , ) , ) return scores class _a ( snake_case_ ): """simple docstring""" def __init__( self : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] ): A_ = generate_config.eos_token_id A_ = generate_config.no_timestamps_token_id A_ = generate_config.no_timestamps_token_id + 1 A_ = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(UpperCAmelCase , "max_initial_timestamp_index" ): A_ = generate_config.max_initial_timestamp_index else: A_ = model_config.vocab_size if self.max_initial_timestamp_index is None: A_ = model_config.vocab_size def __call__( self : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : str ): # suppress <|notimestamps|> which is handled by without_timestamps A_ = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) ) def handle_pairs(UpperCAmelCase : List[str] , UpperCAmelCase : Dict ): A_ = jnp.where((cur_len - self.begin_index) >= 1 , UpperCAmelCase , UpperCAmelCase ) A_ = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , UpperCAmelCase , ) A_ = jnp.where((cur_len - self.begin_index) < 2 , UpperCAmelCase , UpperCAmelCase ) A_ = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , UpperCAmelCase , UpperCAmelCase , ) return jnp.where( UpperCAmelCase , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , UpperCAmelCase , ) A_ = jax.vmap(UpperCAmelCase )(UpperCAmelCase , UpperCAmelCase ) A_ = jnp.where(cur_len == self.begin_index , UpperCAmelCase , UpperCAmelCase ) A_ = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , UpperCAmelCase , ) A_ = self.timestamp_begin + self.max_initial_timestamp_index A_ = jnp.where( UpperCAmelCase , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , UpperCAmelCase , ) # if sum of probability over timestamps is above any other token, sample timestamp A_ = jax.nn.log_softmax(UpperCAmelCase , axis=-1 ) def handle_cumulative_probs(UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ): A_ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) A_ = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , UpperCAmelCase , ) A_ = jax.vmap(UpperCAmelCase )(UpperCAmelCase , UpperCAmelCase ) return scores
329
import math __a :Union[str, Any] = 10 __a :Union[str, Any] = 7 __a :int = BALLS_PER_COLOUR * NUM_COLOURS def __snake_case ( __UpperCamelCase : int = 20 ): """simple docstring""" A_ = math.comb(__UpperCamelCase ,__UpperCamelCase ) A_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR ,__UpperCamelCase ) A_ = NUM_COLOURS * (1 - missing_colour / total) return f'''{result:.9f}''' if __name__ == "__main__": print(solution(20))
329
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __a :List[Any] = logging.get_logger(__name__) __a :Optional[Any] = '▁' __a :int = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'} __a :Any = { 'vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model', }, 'monolingual_vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt', }, } __a :Union[str, Any] = {'vinai/bartpho-syllable': 1024} class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : int = VOCAB_FILES_NAMES _lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase : str = ['input_ids', 'attention_mask'] def __init__( self : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int]="<s>" , UpperCAmelCase : Optional[Any]="</s>" , UpperCAmelCase : List[Any]="</s>" , UpperCAmelCase : List[str]="<s>" , UpperCAmelCase : int="<unk>" , UpperCAmelCase : Optional[Any]="<pad>" , UpperCAmelCase : Tuple="<mask>" , UpperCAmelCase : Optional[Dict[str, Any]] = None , **UpperCAmelCase : Tuple , ): # Mask token behave like a normal word, i.e. include the space before it A_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token A_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , ) A_ = vocab_file A_ = monolingual_vocab_file A_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCAmelCase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility A_ = {} A_ = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(UpperCAmelCase ) not in self.fairseq_tokens_to_ids: A_ = cnt cnt += 1 with open(UpperCAmelCase , "r" , encoding="utf-8" ) as f: for line in f.readlines(): A_ = line.strip().split()[0] A_ = len(self.fairseq_tokens_to_ids ) if str(UpperCAmelCase ) not in self.fairseq_tokens_to_ids: A_ = len(self.fairseq_tokens_to_ids ) A_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Dict ): A_ = self.__dict__.copy() A_ = None A_ = self.sp_model.serialized_model_proto() return state def __setstate__( self : List[str] , UpperCAmelCase : str ): A_ = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): A_ = {} A_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def __A ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A_ = [self.cls_token_id] A_ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __A ( self : List[str] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(UpperCAmelCase )) + [1] return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1] def __A ( self : int , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ): A_ = [self.sep_token_id] A_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __A ( self : List[Any] ): return len(self.fairseq_ids_to_tokens ) def __A ( self : Tuple ): A_ = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __A ( self : Dict , UpperCAmelCase : str ): return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase ) def __A ( self : Optional[Any] , UpperCAmelCase : Dict ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def __A ( self : str , UpperCAmelCase : Any ): return self.fairseq_ids_to_tokens[index] def __A ( self : Any , UpperCAmelCase : Any ): A_ = "".join(UpperCAmelCase ).replace(UpperCAmelCase , " " ).strip() return out_string def __A ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ): if not os.path.isdir(UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return A_ = os.path.join( UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) A_ = os.path.join( UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase , "wb" ) as fi: A_ = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , UpperCAmelCase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(UpperCAmelCase , "w" , encoding="utf-8" ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'''{str(UpperCAmelCase )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
329
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer __a :Optional[Any] = logging.get_logger(__name__) __a :Any = {'vocab_file': 'vocab.txt'} __a :Any = { 'vocab_file': { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt', } } __a :List[str] = { 'YituTech/conv-bert-base': 512, 'YituTech/conv-bert-medium-small': 512, 'YituTech/conv-bert-small': 512, } __a :List[str] = { 'YituTech/conv-bert-base': {'do_lower_case': True}, 'YituTech/conv-bert-medium-small': {'do_lower_case': True}, 'YituTech/conv-bert-small': {'do_lower_case': True}, } class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Tuple = VOCAB_FILES_NAMES _lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase : int = PRETRAINED_INIT_CONFIGURATION _lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase : Union[str, Any] = ConvBertTokenizer def __init__( self : Optional[int] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : int="[UNK]" , UpperCAmelCase : str="[SEP]" , UpperCAmelCase : Union[str, Any]="[PAD]" , UpperCAmelCase : Tuple="[CLS]" , UpperCAmelCase : Tuple="[MASK]" , UpperCAmelCase : Any=True , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : List[str] , ): super().__init__( UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , ) A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars ): A_ = getattr(UpperCAmelCase , normalizer_state.pop("type" ) ) A_ = do_lower_case A_ = strip_accents A_ = tokenize_chinese_chars A_ = normalizer_class(**UpperCAmelCase ) A_ = do_lower_case def __A ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Dict=None ): A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __A ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ): A_ = [self.sep_token_id] A_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ): A_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase ) return tuple(UpperCAmelCase )
329
1
from pathlib import Path import fire def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : int ): """simple docstring""" A_ = Path(__UpperCamelCase ) A_ = Path(__UpperCamelCase ) dest_dir.mkdir(exist_ok=__UpperCamelCase ) for path in src_dir.iterdir(): A_ = [x.rstrip() for x in list(path.open().readlines() )][:n] A_ = dest_dir.joinpath(path.name ) print(__UpperCamelCase ) dest_path.open("w" ).write("\n".join(__UpperCamelCase ) ) if __name__ == "__main__": fire.Fire(minify)
329
import warnings from ...utils import logging from .image_processing_videomae import VideoMAEImageProcessor __a :Optional[Any] = logging.get_logger(__name__) class _a ( snake_case_ ): """simple docstring""" def __init__( self : List[str] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ): warnings.warn( "The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use VideoMAEImageProcessor instead." , UpperCAmelCase , ) super().__init__(*UpperCAmelCase , **UpperCAmelCase )
329
1
import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Tuple = 'char' _lowerCamelCase : str = 'bpe' _lowerCamelCase : Optional[Any] = 'wp' __a :Union[str, Any] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Optional[int] = ['image_processor', 'char_tokenizer'] _lowerCamelCase : Any = 'ViTImageProcessor' _lowerCamelCase : Union[str, Any] = 'MgpstrTokenizer' def __init__( self : Tuple , UpperCAmelCase : List[str]=None , UpperCAmelCase : Optional[Any]=None , **UpperCAmelCase : List[Any] ): A_ = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , UpperCAmelCase , ) A_ = kwargs.pop("feature_extractor" ) A_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) A_ = tokenizer A_ = AutoTokenizer.from_pretrained("gpt2" ) A_ = AutoTokenizer.from_pretrained("bert-base-uncased" ) super().__init__(UpperCAmelCase , UpperCAmelCase ) def __call__( self : int , UpperCAmelCase : Any=None , UpperCAmelCase : Tuple=None , UpperCAmelCase : Optional[int]=None , **UpperCAmelCase : Any ): if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process." ) if images is not None: A_ = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ) if text is not None: A_ = self.char_tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase ) if text is None: return inputs elif images is None: return encodings else: A_ = encodings["input_ids"] return inputs def __A ( self : str , UpperCAmelCase : int ): A_ , A_ , A_ = sequences A_ = char_preds.size(0 ) A_ , A_ = self._decode_helper(UpperCAmelCase , "char" ) A_ , A_ = self._decode_helper(UpperCAmelCase , "bpe" ) A_ , A_ = self._decode_helper(UpperCAmelCase , "wp" ) A_ = [] A_ = [] for i in range(UpperCAmelCase ): A_ = [char_scores[i], bpe_scores[i], wp_scores[i]] A_ = [char_strs[i], bpe_strs[i], wp_strs[i]] A_ = scores.index(max(UpperCAmelCase ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) A_ = {} A_ = final_strs A_ = final_scores A_ = char_strs A_ = bpe_strs A_ = wp_strs return out def __A ( self : str , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] ): if format == DecodeType.CHARACTER: A_ = self.char_decode A_ = 1 A_ = "[s]" elif format == DecodeType.BPE: A_ = self.bpe_decode A_ = 2 A_ = "#" elif format == DecodeType.WORDPIECE: A_ = self.wp_decode A_ = 102 A_ = "[SEP]" else: raise ValueError(f'''Format {format} is not supported.''' ) A_ , A_ = [], [] A_ = pred_logits.size(0 ) A_ = pred_logits.size(1 ) A_ , A_ = pred_logits.topk(1 , dim=-1 , largest=UpperCAmelCase , sorted=UpperCAmelCase ) A_ = preds_index.view(-1 , UpperCAmelCase )[:, 1:] A_ = decoder(UpperCAmelCase ) A_ , A_ = torch.nn.functional.softmax(UpperCAmelCase , dim=2 ).max(dim=2 ) A_ = preds_max_prob[:, 1:] for index in range(UpperCAmelCase ): A_ = preds_str[index].find(UpperCAmelCase ) A_ = preds_str[index][:pred_eos] A_ = preds_index[index].cpu().tolist() A_ = pred_index.index(UpperCAmelCase ) if eos_token in pred_index else -1 A_ = preds_max_prob[index][: pred_eos_index + 1] A_ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(UpperCAmelCase ) conf_scores.append(UpperCAmelCase ) return dec_strs, conf_scores def __A ( self : str , UpperCAmelCase : List[str] ): A_ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(UpperCAmelCase )] return decode_strs def __A ( self : List[str] , UpperCAmelCase : str ): return self.bpe_tokenizer.batch_decode(UpperCAmelCase ) def __A ( self : Tuple , UpperCAmelCase : Optional[int] ): A_ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(UpperCAmelCase )] return decode_strs
329
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _a : """simple docstring""" @staticmethod def __A ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Union[str, Any] ): pass @is_pipeline_test @require_vision class _a ( unittest.TestCase ): """simple docstring""" @require_torch def __A ( self : List[str] ): A_ = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , ) A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(UpperCAmelCase ) , [ [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}], ] , ) A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], ] , ) @require_tf def __A ( self : int ): A_ = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" ) A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) A_ = image_classifier(UpperCAmelCase , candidate_labels=["a", "b", "c"] ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , ) A_ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], [ {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, {"score": 0.333, "label": ANY(UpperCAmelCase )}, ], ] , ) @slow @require_torch def __A ( self : Any ): A_ = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , ) # This is an image of 2 cats with remotes and no planes A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , ) @slow @require_tf def __A ( self : Optional[Any] ): A_ = pipeline( task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" ) # This is an image of 2 cats with remotes and no planes A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) A_ = image_classifier(UpperCAmelCase , candidate_labels=["cat", "plane", "remote"] ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ] , ) A_ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCAmelCase ) , [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5 , )
329
1
from __future__ import annotations import unittest from transformers import DistilBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.distilbert.modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertModel, ) class _a : """simple docstring""" def __init__( self : str , UpperCAmelCase : Any , ): A_ = parent A_ = 13 A_ = 7 A_ = True A_ = True A_ = False A_ = True A_ = 99 A_ = 32 A_ = 2 A_ = 4 A_ = 37 A_ = "gelu" A_ = 0.1 A_ = 0.1 A_ = 512 A_ = 16 A_ = 2 A_ = 0.02 A_ = 3 A_ = 4 A_ = None def __A ( self : str ): A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ = None if self.use_input_mask: A_ = random_attention_mask([self.batch_size, self.seq_length] ) A_ = None A_ = None A_ = None if self.use_labels: A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A_ = ids_tensor([self.batch_size] , self.num_choices ) A_ = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __A ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple ): A_ = TFDistilBertModel(config=UpperCAmelCase ) A_ = {"input_ids": input_ids, "attention_mask": input_mask} A_ = model(UpperCAmelCase ) A_ = [input_ids, input_mask] A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] ): A_ = TFDistilBertForMaskedLM(config=UpperCAmelCase ) A_ = {"input_ids": input_ids, "attention_mask": input_mask} A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] ): A_ = TFDistilBertForQuestionAnswering(config=UpperCAmelCase ) A_ = { "input_ids": input_ids, "attention_mask": input_mask, } A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __A ( self : str , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : int ): A_ = self.num_labels A_ = TFDistilBertForSequenceClassification(UpperCAmelCase ) A_ = {"input_ids": input_ids, "attention_mask": input_mask} A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[Any] ): A_ = self.num_choices A_ = TFDistilBertForMultipleChoice(UpperCAmelCase ) A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) A_ = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, } A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __A ( self : str , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str ): A_ = self.num_labels A_ = TFDistilBertForTokenClassification(UpperCAmelCase ) A_ = {"input_ids": input_ids, "attention_mask": input_mask} A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __A ( self : Tuple ): A_ = self.prepare_config_and_inputs() ((A_) , (A_) , (A_) , (A_) , (A_) , (A_)) = config_and_inputs A_ = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class _a ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : Optional[Any] = ( ( TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertForMultipleChoice, ) if is_tf_available() else None ) _lowerCamelCase : Optional[int] = ( { 'feature-extraction': TFDistilBertModel, 'fill-mask': TFDistilBertForMaskedLM, 'question-answering': TFDistilBertForQuestionAnswering, 'text-classification': TFDistilBertForSequenceClassification, 'token-classification': TFDistilBertForTokenClassification, 'zero-shot': TFDistilBertForSequenceClassification, } if is_tf_available() else {} ) _lowerCamelCase : Optional[int] = False _lowerCamelCase : Dict = False def __A ( self : Optional[int] ): A_ = TFDistilBertModelTester(self ) A_ = ConfigTester(self , config_class=UpperCAmelCase , dim=37 ) def __A ( self : List[Any] ): self.config_tester.run_common_tests() def __A ( self : List[Any] ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase ) def __A ( self : Any ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase ) def __A ( self : Tuple ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase ) def __A ( self : str ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase ) def __A ( self : int ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase ) def __A ( self : str ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase ) @slow def __A ( self : Optional[int] ): for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ): A_ = TFDistilBertModel.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) @require_tf class _a ( unittest.TestCase ): """simple docstring""" @slow def __A ( self : List[str] ): A_ = TFDistilBertModel.from_pretrained("distilbert-base-uncased" ) A_ = tf.constant([[0, 1, 2, 3, 4, 5]] ) A_ = model(UpperCAmelCase )[0] A_ = [1, 6, 768] self.assertEqual(output.shape , UpperCAmelCase ) A_ = tf.constant( [ [ [0.19_261_885, -0.13_732_955, 0.4_119_799], [0.22_150_156, -0.07_422_661, 0.39_037_204], [0.22_756_018, -0.0_896_414, 0.3_701_467], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-4 )
329
import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict=10 ): """simple docstring""" A_ = [] for _ in range(__UpperCamelCase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Tuple=10 ): """simple docstring""" A_ = [] for step in range(__UpperCamelCase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: A_ = os.path.join(__UpperCamelCase ,"schedule.bin" ) torch.save(scheduler.state_dict() ,__UpperCamelCase ) A_ = torch.load(__UpperCamelCase ) scheduler.load_state_dict(__UpperCamelCase ) return lrs @require_torch class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : Any , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ): self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) ) for a, b in zip(UpperCAmelCase , UpperCAmelCase ): self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase ) def __A ( self : List[Any] ): A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase ) A_ = torch.tensor([0.4, 0.2, -0.5] ) A_ = nn.MSELoss() # No warmup, constant schedule, no gradient clipping A_ = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 ) for _ in range(100 ): A_ = criterion(UpperCAmelCase , UpperCAmelCase ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) def __A ( self : Dict ): A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase ) A_ = torch.tensor([0.4, 0.2, -0.5] ) A_ = nn.MSELoss() # No warmup, constant schedule, no gradient clipping A_ = Adafactor( params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCAmelCase , weight_decay=0.0 , relative_step=UpperCAmelCase , scale_parameter=UpperCAmelCase , warmup_init=UpperCAmelCase , ) for _ in range(1000 ): A_ = criterion(UpperCAmelCase , UpperCAmelCase ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) @require_torch class _a ( unittest.TestCase ): """simple docstring""" _lowerCamelCase : Optional[int] = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None _lowerCamelCase : Any = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None _lowerCamelCase : Any = 1_0 def __A ( self : str , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Dict=None ): self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) ) for a, b in zip(UpperCAmelCase , UpperCAmelCase ): self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase , msg=UpperCAmelCase ) def __A ( self : List[Any] ): A_ = {"num_warmup_steps": 2, "num_training_steps": 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) A_ = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"num_warmup_steps": 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, "num_cycles": 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, "power": 2.0, "lr_end": 1E-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {"num_warmup_steps": 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): A_ , A_ = data A_ = scheduler_func(self.optimizer , **UpperCAmelCase ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) A_ = unwrap_schedule(UpperCAmelCase , self.num_steps ) self.assertListAlmostEqual( UpperCAmelCase , UpperCAmelCase , tol=1E-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , ) A_ = scheduler_func(self.optimizer , **UpperCAmelCase ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(UpperCAmelCase ) # wrap to test picklability of the schedule A_ = unwrap_and_save_reload_schedule(UpperCAmelCase , self.num_steps ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase , msg=f'''failed for {scheduler_func} in save and reload''' ) class _a : """simple docstring""" def __init__( self : List[str] , UpperCAmelCase : List[str] ): A_ = fn def __call__( self : Union[str, Any] , *UpperCAmelCase : str , **UpperCAmelCase : Optional[Any] ): return self.fn(*UpperCAmelCase , **UpperCAmelCase ) @classmethod def __A ( self : Dict , UpperCAmelCase : List[str] ): A_ = list(map(self , scheduler.lr_lambdas ) )
329
1
from __future__ import annotations def __snake_case ( __UpperCamelCase : dict ,__UpperCamelCase : str ): """simple docstring""" A_ , A_ = set(__UpperCamelCase ), [start] while stack: A_ = stack.pop() explored.add(__UpperCamelCase ) # Differences from BFS: # 1) pop last element instead of first one # 2) add adjacent elements to stack without exploring them for adj in reversed(graph[v] ): if adj not in explored: stack.append(__UpperCamelCase ) return explored __a :int = { 'A': ['B', 'C', 'D'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F'], 'D': ['B', 'D'], 'E': ['B', 'F'], 'F': ['C', 'E', 'G'], 'G': ['F'], } if __name__ == "__main__": import doctest doctest.testmod() print(depth_first_search(G, 'A'))
329
import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def __snake_case ( __UpperCamelCase : Optional[int] ): # picklable for multiprocessing """simple docstring""" return x.sum() def __snake_case ( __UpperCamelCase : List[str] ): # picklable for multiprocessing """simple docstring""" return i + 1 @dataclass class _a : """simple docstring""" _lowerCamelCase : int _lowerCamelCase : str class _a ( snake_case_ ): """simple docstring""" def __A ( self : Dict ): A_ = {} A_ = [] A_ = 1 A_ = [1, 2] A_ = {"a": 1, "b": 2} A_ = {"a": [1, 2], "b": [3, 4]} A_ = {"a": {"1": 1}, "b": 2} A_ = {"a": 1, "b": 2, "c": 3, "d": 4} A_ = {} A_ = [] A_ = 2 A_ = [2, 3] A_ = {"a": 2, "b": 3} A_ = {"a": [2, 3], "b": [4, 5]} A_ = {"a": {"1": 2}, "b": 3} A_ = {"a": 2, "b": 3, "c": 4, "d": 5} self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase ) A_ = 2 self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) A_ = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )} A_ = {"a": 2, "b": 0, "c": 2} A_ = { "a": np.eye(2 ).astype(UpperCAmelCase ), "b": np.zeros(3 ).astype(UpperCAmelCase ), "c": np.ones(2 ).astype(UpperCAmelCase ), } self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual( {k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual( {k: v.tolist() for k, v in map_nested(UpperCAmelCase , UpperCAmelCase , map_numpy=UpperCAmelCase , num_proc=UpperCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(UpperCAmelCase ): # can't pickle a local lambda map_nested(lambda UpperCAmelCase : x + 1 , UpperCAmelCase , num_proc=UpperCAmelCase ) def __A ( self : List[str] ): A_ = {"a": 1, "b": 2} A_ = {"a": 3, "b": 4} A_ = {"a": 5, "b": 6} A_ = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ) , UpperCAmelCase ) def __A ( self : Any ): class _a : """simple docstring""" _lowerCamelCase : int = 'bar' A_ = Foo() self.assertEqual(foo.my_attr , "bar" ) with temporary_assignment(UpperCAmelCase , "my_attr" , "BAR" ): self.assertEqual(foo.my_attr , "BAR" ) self.assertEqual(foo.my_attr , "bar" ) @pytest.mark.parametrize( "iterable_length, num_proc, expected_num_proc" ,[ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ] ,) def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[Any] ): """simple docstring""" with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch( "datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool: A_ = {f'''{i}''': i for i in range(__UpperCamelCase )} A_ = map_nested(lambda __UpperCamelCase : x + 10 ,__UpperCamelCase ,num_proc=__UpperCamelCase ,parallel_min_length=16 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class _a ( snake_case_ ): """simple docstring""" @require_tf def __A ( self : Union[str, Any] ): import tensorflow as tf from tensorflow.keras import layers A_ = layers.Dense(2 ) def gen_random_output(): A_ = tf.random.uniform((1, 3) ) return model(UpperCAmelCase ).numpy() with temp_seed(42 , set_tensorflow=UpperCAmelCase ): A_ = gen_random_output() with temp_seed(42 , set_tensorflow=UpperCAmelCase ): A_ = gen_random_output() A_ = gen_random_output() np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @require_torch def __A ( self : Optional[int] ): import torch def gen_random_output(): A_ = torch.nn.Linear(3 , 2 ) A_ = torch.rand(1 , 3 ) return model(UpperCAmelCase ).detach().numpy() with temp_seed(42 , set_pytorch=UpperCAmelCase ): A_ = gen_random_output() with temp_seed(42 , set_pytorch=UpperCAmelCase ): A_ = gen_random_output() A_ = gen_random_output() np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) def __A ( self : Any ): def gen_random_output(): return np.random.rand(1 , 3 ) with temp_seed(42 ): A_ = gen_random_output() with temp_seed(42 ): A_ = gen_random_output() A_ = gen_random_output() np.testing.assert_equal(UpperCAmelCase , UpperCAmelCase ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @pytest.mark.parametrize("input_data" ,[{}] ) def __snake_case ( __UpperCamelCase : str ): """simple docstring""" A_ = NestedDataStructure(__UpperCamelCase ).data assert output_data == input_data @pytest.mark.parametrize( "data, expected_output" ,[ ({}, []), ([], []), ("foo", ["foo"]), (["foo", "bar"], ["foo", "bar"]), ([["foo", "bar"]], ["foo", "bar"]), ([[["foo"], ["bar"]]], ["foo", "bar"]), ([[["foo"], "bar"]], ["foo", "bar"]), ({"a": 1, "b": 2}, [1, 2]), ({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]), ({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]), ({"a": {"1": 1}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": 2}, [1, 2]), ({"a": {"1": [1]}, "b": [2]}, [1, 2]), ] ,) def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : Any ): """simple docstring""" A_ = NestedDataStructure(__UpperCamelCase ).flatten() assert output == expected_output def __snake_case ( ): """simple docstring""" A_ = A(x=1 ,y="foobar" ) A_ = {"x": 1, "y": "foobar"} assert asdict(__UpperCamelCase ) == expected_output A_ = {"a": {"b": A(x=10 ,y="foo" )}, "c": [A(x=20 ,y="bar" )]} A_ = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]} assert asdict(__UpperCamelCase ) == expected_output with pytest.raises(__UpperCamelCase ): asdict([1, A(x=10 ,y="foo" )] ) def __snake_case ( __UpperCamelCase : str ): """simple docstring""" return text.split() def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def __snake_case ( ): """simple docstring""" with Pool(2 ) as pool: A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) ) assert out.count("hello" ) == 10 assert out.count("there" ) == 10 assert len(__UpperCamelCase ) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: A_ = list(iflatmap_unordered(__UpperCamelCase ,_split_text ,kwargs_iterable=[{"text": "hello there"}] * 10 ) ) assert out.count("hello" ) == 10 assert out.count("there" ) == 10 assert len(__UpperCamelCase ) == 20 # check that we get items as fast as possible with Pool(2 ) as pool: A_ = [] for yield_time, content in iflatmap_unordered( __UpperCamelCase ,_aseconds_generator_of_aitems_with_timing ,kwargs_iterable=[{"content": "a"}, {"content": "b"}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(__UpperCamelCase ) assert out.count("a" ) == 2 assert out.count("b" ) == 2 assert len(__UpperCamelCase ) == 4
329
1
import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging __a :str = logging.get_logger(__name__) # pylint: disable=invalid-name class _a ( snake_case_ ): """simple docstring""" def __init__( self : List[str] , UpperCAmelCase : WhisperForConditionalGeneration , UpperCAmelCase : WhisperProcessor , UpperCAmelCase : AutoencoderKL , UpperCAmelCase : CLIPTextModel , UpperCAmelCase : CLIPTokenizer , UpperCAmelCase : UNetaDConditionModel , UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase : StableDiffusionSafetyChecker , UpperCAmelCase : CLIPImageProcessor , ): super().__init__() if safety_checker is None: logger.warning( f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure''' " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( speech_model=UpperCAmelCase , speech_processor=UpperCAmelCase , vae=UpperCAmelCase , text_encoder=UpperCAmelCase , tokenizer=UpperCAmelCase , unet=UpperCAmelCase , scheduler=UpperCAmelCase , feature_extractor=UpperCAmelCase , ) def __A ( self : Optional[Any] , UpperCAmelCase : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": A_ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(UpperCAmelCase ) def __A ( self : List[str] ): self.enable_attention_slicing(UpperCAmelCase ) @torch.no_grad() def __call__( self : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=16000 , UpperCAmelCase : int = 512 , UpperCAmelCase : int = 512 , UpperCAmelCase : int = 50 , UpperCAmelCase : float = 7.5 , UpperCAmelCase : Optional[Union[str, List[str]]] = None , UpperCAmelCase : Optional[int] = 1 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[torch.Generator] = None , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase : int = 1 , **UpperCAmelCase : int , ): A_ = self.speech_processor.feature_extractor( UpperCAmelCase , return_tensors="pt" , sampling_rate=UpperCAmelCase ).input_features.to(self.device ) A_ = self.speech_model.generate(UpperCAmelCase , max_length=480000 ) A_ = self.speech_processor.tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase , normalize=UpperCAmelCase )[ 0 ] if isinstance(UpperCAmelCase , UpperCAmelCase ): A_ = 1 elif isinstance(UpperCAmelCase , UpperCAmelCase ): A_ = len(UpperCAmelCase ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(UpperCAmelCase )}''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(UpperCAmelCase , UpperCAmelCase ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(UpperCAmelCase )}.''' ) # get prompt text embeddings A_ = self.tokenizer( UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) A_ = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: A_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) A_ = text_input_ids[:, : self.tokenizer.model_max_length] A_ = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method A_ , A_ , A_ = text_embeddings.shape A_ = text_embeddings.repeat(1 , UpperCAmelCase , 1 ) A_ = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCAmelCase , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. A_ = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: A_ = 42 if negative_prompt is None: A_ = [""] * batch_size elif type(UpperCAmelCase ) is not type(UpperCAmelCase ): raise TypeError( f'''`negative_prompt` should be the same type to `prompt`, but got {type(UpperCAmelCase )} !=''' f''' {type(UpperCAmelCase )}.''' ) elif isinstance(UpperCAmelCase , UpperCAmelCase ): A_ = [negative_prompt] elif batch_size != len(UpperCAmelCase ): raise ValueError( f'''`negative_prompt`: {negative_prompt} has batch size {len(UpperCAmelCase )}, but `prompt`:''' f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' " the batch size of `prompt`." ) else: A_ = negative_prompt A_ = text_input_ids.shape[-1] A_ = self.tokenizer( UpperCAmelCase , padding="max_length" , max_length=UpperCAmelCase , truncation=UpperCAmelCase , return_tensors="pt" , ) A_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method A_ = uncond_embeddings.shape[1] A_ = uncond_embeddings.repeat(1 , UpperCAmelCase , 1 ) A_ = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCAmelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes A_ = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. A_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) A_ = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps A_ = torch.randn(UpperCAmelCase , generator=UpperCAmelCase , device="cpu" , dtype=UpperCAmelCase ).to( self.device ) else: A_ = torch.randn(UpperCAmelCase , generator=UpperCAmelCase , device=self.device , dtype=UpperCAmelCase ) else: if latents.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) A_ = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(UpperCAmelCase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand A_ = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler A_ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] A_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) A_ = {} if accepts_eta: A_ = eta for i, t in enumerate(self.progress_bar(UpperCAmelCase ) ): # expand the latents if we are doing classifier free guidance A_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents A_ = self.scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase ) # predict the noise residual A_ = self.unet(UpperCAmelCase , UpperCAmelCase , encoder_hidden_states=UpperCAmelCase ).sample # perform guidance if do_classifier_free_guidance: A_ , A_ = noise_pred.chunk(2 ) A_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 A_ = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) A_ = 1 / 0.18_215 * latents A_ = self.vae.decode(UpperCAmelCase ).sample A_ = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": A_ = self.numpy_to_pil(UpperCAmelCase ) if not return_dict: return image return StableDiffusionPipelineOutput(images=UpperCAmelCase , nsfw_content_detected=UpperCAmelCase )
329
import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def __snake_case ( __UpperCamelCase : List[Any] ): """simple docstring""" if ( (cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F) or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) # or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) # or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) # or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) # or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) # or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F) or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) # ): # return True return False def __snake_case ( __UpperCamelCase : str ): """simple docstring""" for char in word: A_ = ord(__UpperCamelCase ) if not _is_chinese_char(__UpperCamelCase ): return 0 return 1 def __snake_case ( __UpperCamelCase : List[str] ): """simple docstring""" A_ = set() for token in tokens: A_ = len(__UpperCamelCase ) > 1 and is_chinese(__UpperCamelCase ) if chinese_word: word_set.add(__UpperCamelCase ) A_ = list(__UpperCamelCase ) return word_list def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : set() ): """simple docstring""" if not chinese_word_set: return bert_tokens A_ = max([len(__UpperCamelCase ) for w in chinese_word_set] ) A_ = bert_tokens A_ , A_ = 0, len(__UpperCamelCase ) while start < end: A_ = True if is_chinese(bert_word[start] ): A_ = min(end - start ,__UpperCamelCase ) for i in range(__UpperCamelCase ,1 ,-1 ): A_ = "".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 ,start + i ): A_ = "##" + bert_word[j] A_ = start + i A_ = False break if single_word: start += 1 return bert_word def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : LTP ,__UpperCamelCase : BertTokenizer ): """simple docstring""" A_ = [] for i in range(0 ,len(__UpperCamelCase ) ,100 ): A_ = ltp_tokenizer.seg(lines[i : i + 100] )[0] A_ = [get_chinese_word(__UpperCamelCase ) for r in res] ltp_res.extend(__UpperCamelCase ) assert len(__UpperCamelCase ) == len(__UpperCamelCase ) A_ = [] for i in range(0 ,len(__UpperCamelCase ) ,100 ): A_ = bert_tokenizer(lines[i : i + 100] ,add_special_tokens=__UpperCamelCase ,truncation=__UpperCamelCase ,max_length=512 ) bert_res.extend(res["input_ids"] ) assert len(__UpperCamelCase ) == len(__UpperCamelCase ) A_ = [] for input_ids, chinese_word in zip(__UpperCamelCase ,__UpperCamelCase ): A_ = [] for id in input_ids: A_ = bert_tokenizer._convert_id_to_token(__UpperCamelCase ) input_tokens.append(__UpperCamelCase ) A_ = add_sub_symbol(__UpperCamelCase ,__UpperCamelCase ) A_ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(__UpperCamelCase ): if token[:2] == "##": A_ = token[2:] # save chinese tokens' pos if len(__UpperCamelCase ) == 1 and _is_chinese_char(ord(__UpperCamelCase ) ): ref_id.append(__UpperCamelCase ) ref_ids.append(__UpperCamelCase ) assert len(__UpperCamelCase ) == len(__UpperCamelCase ) return ref_ids def __snake_case ( __UpperCamelCase : Dict ): """simple docstring""" with open(args.file_name ,"r" ,encoding="utf-8" ) as f: A_ = f.readlines() A_ = [line.strip() for line in data if len(__UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' A_ = LTP(args.ltp ) # faster in GPU device A_ = BertTokenizer.from_pretrained(args.bert ) A_ = prepare_ref(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) with open(args.save_path ,"w" ,encoding="utf-8" ) as f: A_ = [json.dumps(__UpperCamelCase ) + "\n" for ref in ref_ids] f.writelines(__UpperCamelCase ) if __name__ == "__main__": __a :List[Any] = argparse.ArgumentParser(description='prepare_chinese_ref') parser.add_argument( '--file_name', type=str, default='./resources/chinese-demo.txt', help='file need process, same as training data in lm', ) parser.add_argument( '--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path' ) parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer') parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res') __a :Dict = parser.parse_args() main(args)
329
1
import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration __a :List[Any] = pytest.mark.integration __a :str = {'comet'} __a :Any = importlib.util.find_spec('fairseq') is not None __a :Union[str, Any] = {'code_eval'} __a :Optional[Any] = os.name == 'nt' __a :str = {'bertscore', 'frugalscore', 'perplexity'} __a :Tuple = importlib.util.find_spec('transformers') is not None def __snake_case ( __UpperCamelCase : Union[str, Any] ): """simple docstring""" @wraps(__UpperCamelCase ) def wrapper(self : Any ,__UpperCamelCase : Tuple ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest("\"test requires Fairseq\"" ) else: test_case(self ,__UpperCamelCase ) return wrapper def __snake_case ( __UpperCamelCase : Union[str, Any] ): """simple docstring""" @wraps(__UpperCamelCase ) def wrapper(self : Tuple ,__UpperCamelCase : List[str] ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest("\"test requires transformers\"" ) else: test_case(self ,__UpperCamelCase ) return wrapper def __snake_case ( __UpperCamelCase : Optional[Any] ): """simple docstring""" @wraps(__UpperCamelCase ) def wrapper(self : Optional[int] ,__UpperCamelCase : str ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest("\"test not supported on Windows\"" ) else: test_case(self ,__UpperCamelCase ) return wrapper def __snake_case ( ): """simple docstring""" A_ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( snake_case_ , snake_case_ , snake_case_ ) @local class _a ( parameterized.TestCase ): """simple docstring""" _lowerCamelCase : str = {} _lowerCamelCase : str = None @pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" ) @pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" ) def __A ( self : Any , UpperCAmelCase : Optional[Any] ): A_ = "[...]" A_ = importlib.import_module( datasets.load.metric_module_factory(os.path.join("metrics" , UpperCAmelCase ) ).module_path ) A_ = datasets.load.import_main_class(metric_module.__name__ , dataset=UpperCAmelCase ) # check parameters A_ = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(UpperCAmelCase , metric_module.__name__ ): with self.use_local_metrics(): try: A_ = doctest.testmod(UpperCAmelCase , verbose=UpperCAmelCase , raise_on_error=UpperCAmelCase ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def __A ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ): A_ = "[...]" A_ = importlib.import_module( datasets.load.metric_module_factory(os.path.join("metrics" , UpperCAmelCase ) ).module_path ) # run doctest with self.use_local_metrics(): A_ = doctest.testmod(UpperCAmelCase , verbose=UpperCAmelCase , raise_on_error=UpperCAmelCase ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def __A ( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] ): if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](UpperCAmelCase ): yield else: yield @contextmanager def __A ( self : List[Any] ): def load_local_metric(UpperCAmelCase : int , *UpperCAmelCase : List[str] , **UpperCAmelCase : Tuple ): return load_metric(os.path.join("metrics" , UpperCAmelCase ) , *UpperCAmelCase , **UpperCAmelCase ) with patch("datasets.load_metric" ) as mock_load_metric: A_ = load_local_metric yield @classmethod def __A ( cls : List[Any] , UpperCAmelCase : str ): def wrapper(UpperCAmelCase : Optional[Any] ): A_ = contextmanager(UpperCAmelCase ) A_ = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher("bleurt" ) def __snake_case ( __UpperCamelCase : Any ): """simple docstring""" import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string("sv" ,"" ,"" ) # handle pytest cli flags class _a ( snake_case_ ): """simple docstring""" def __A ( self : str , UpperCAmelCase : Union[str, Any] ): assert len(input_dict["input_ids"] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch("bleurt.score._create_predictor" ) as mock_create_predictor: A_ = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher("bertscore" ) def __snake_case ( __UpperCamelCase : int ): """simple docstring""" import torch def bert_cos_score_idf(__UpperCamelCase : List[str] ,__UpperCamelCase : int ,*__UpperCamelCase : Tuple ,**__UpperCamelCase : Tuple ): return torch.tensor([[1.0, 1.0, 1.0]] * len(__UpperCamelCase ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch("bert_score.scorer.get_model" ), patch( "bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf: A_ = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher("comet" ) def __snake_case ( __UpperCamelCase : str ): """simple docstring""" def load_from_checkpoint(__UpperCamelCase : int ): class _a : """simple docstring""" def __A ( self : Any , UpperCAmelCase : Union[str, Any] , *UpperCAmelCase : str , **UpperCAmelCase : Dict ): assert len(UpperCAmelCase ) == 2 A_ = [0.19, 0.92] return scores, sum(UpperCAmelCase ) / len(UpperCAmelCase ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch("comet.download_model" ) as mock_download_model: A_ = None with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint: A_ = load_from_checkpoint yield def __snake_case ( ): """simple docstring""" A_ = load_metric(os.path.join("metrics" ,"seqeval" ) ) A_ = "ERROR" A_ = f'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}''' with pytest.raises(__UpperCamelCase ,match=re.escape(__UpperCamelCase ) ): metric.compute(predictions=[] ,references=[] ,scheme=__UpperCamelCase )
329
import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def __snake_case ( __UpperCamelCase : Features ): """simple docstring""" A_ = np.inf def set_batch_size(__UpperCamelCase : FeatureType ) -> None: nonlocal batch_size if isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(__UpperCamelCase ,__UpperCamelCase ): A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(__UpperCamelCase ,__UpperCamelCase ) and feature.dtype == "binary": A_ = min(__UpperCamelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(__UpperCamelCase ,__UpperCamelCase ) return None if batch_size is np.inf else batch_size class _a ( snake_case_ ): """simple docstring""" def __init__( self : Tuple , UpperCAmelCase : NestedDataStructureLike[PathLike] , UpperCAmelCase : Optional[NamedSplit] = None , UpperCAmelCase : Optional[Features] = None , UpperCAmelCase : str = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : Tuple , ): super().__init__( UpperCAmelCase , split=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , keep_in_memory=UpperCAmelCase , streaming=UpperCAmelCase , num_proc=UpperCAmelCase , **UpperCAmelCase , ) A_ = path_or_paths if isinstance(UpperCAmelCase , UpperCAmelCase ) else {self.split: path_or_paths} A_ = _PACKAGED_DATASETS_MODULES["parquet"][1] A_ = Parquet( cache_dir=UpperCAmelCase , data_files=UpperCAmelCase , features=UpperCAmelCase , hash=UpperCAmelCase , **UpperCAmelCase , ) def __A ( self : Optional[Any] ): # Build iterable dataset if self.streaming: A_ = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: A_ = None A_ = None A_ = None A_ = None self.builder.download_and_prepare( download_config=UpperCAmelCase , download_mode=UpperCAmelCase , verification_mode=UpperCAmelCase , base_path=UpperCAmelCase , num_proc=self.num_proc , ) A_ = self.builder.as_dataset( split=self.split , verification_mode=UpperCAmelCase , in_memory=self.keep_in_memory ) return dataset class _a : """simple docstring""" def __init__( self : Any , UpperCAmelCase : Dataset , UpperCAmelCase : Union[PathLike, BinaryIO] , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : List[Any] , ): A_ = dataset A_ = path_or_buf A_ = batch_size or get_writer_batch_size(dataset.features ) A_ = parquet_writer_kwargs def __A ( self : int ): A_ = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , "wb+" ) as buffer: A_ = self._write(file_obj=UpperCAmelCase , batch_size=UpperCAmelCase , **self.parquet_writer_kwargs ) else: A_ = self._write(file_obj=self.path_or_buf , batch_size=UpperCAmelCase , **self.parquet_writer_kwargs ) return written def __A ( self : Tuple , UpperCAmelCase : BinaryIO , UpperCAmelCase : int , **UpperCAmelCase : Optional[Any] ): A_ = 0 A_ = parquet_writer_kwargs.pop("path_or_buf" , UpperCAmelCase ) A_ = self.dataset.features.arrow_schema A_ = pq.ParquetWriter(UpperCAmelCase , schema=UpperCAmelCase , **UpperCAmelCase ) for offset in logging.tqdm( range(0 , len(self.dataset ) , UpperCAmelCase ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ): A_ = query_table( table=self.dataset._data , key=slice(UpperCAmelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(UpperCAmelCase ) written += batch.nbytes writer.close() return written
329
1
import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class _a ( snake_case_ ): """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase : Dict=0.01 , UpperCAmelCase : Optional[int]=1000 ): A_ = p_stop A_ = max_length def __iter__( self : Any ): A_ = 0 A_ = False while not stop and count < self.max_length: yield count count += 1 A_ = random.random() < self.p_stop class _a ( unittest.TestCase ): """simple docstring""" def __A ( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : int=True ): A_ = [ BatchSamplerShard(UpperCAmelCase , 2 , UpperCAmelCase , split_batches=UpperCAmelCase , even_batches=UpperCAmelCase ) for i in range(2 ) ] A_ = [list(UpperCAmelCase ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(UpperCAmelCase ) for shard in batch_sampler_shards] , [len(UpperCAmelCase ) for e in expected] ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) def __A ( self : str ): # Check the shards when the dataset is a round multiple of total batch size. A_ = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCAmelCase ) A_ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase ) A_ = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. A_ = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCAmelCase ) A_ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase ) A_ = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCAmelCase ) A_ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. A_ = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCAmelCase ) A_ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase ) A_ = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCAmelCase ) A_ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. A_ = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCAmelCase ) A_ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase ) A_ = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCAmelCase ) A_ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase ) # Check the shards when the dataset is very small. A_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCAmelCase ) A_ = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase ) A_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCAmelCase ) A_ = [[], []] self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase ) def __A ( self : List[str] ): # Check the shards when the dataset is a round multiple of batch size. A_ = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCAmelCase ) A_ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase ) A_ = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size. A_ = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCAmelCase ) A_ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase ) A_ = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCAmelCase ) A_ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. A_ = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCAmelCase ) A_ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase ) A_ = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCAmelCase ) A_ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase ) # Check the shards when the dataset is very small. A_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCAmelCase ) A_ = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase ) A_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCAmelCase ) A_ = [[], []] self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase ) def __A ( self : Tuple ): # Check the shards when the dataset is a round multiple of total batch size. A_ = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCAmelCase ) A_ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , even_batches=UpperCAmelCase ) A_ = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , even_batches=UpperCAmelCase ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. A_ = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCAmelCase ) A_ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , even_batches=UpperCAmelCase ) A_ = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCAmelCase ) A_ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , even_batches=UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. A_ = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCAmelCase ) A_ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , even_batches=UpperCAmelCase ) A_ = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCAmelCase ) A_ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , even_batches=UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. A_ = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCAmelCase ) A_ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , even_batches=UpperCAmelCase ) A_ = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCAmelCase ) A_ = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , even_batches=UpperCAmelCase ) # Check the shards when the dataset is very small. A_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCAmelCase ) A_ = [[[0, 1]], []] self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , even_batches=UpperCAmelCase ) A_ = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCAmelCase ) A_ = [[], []] self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , even_batches=UpperCAmelCase ) def __A ( self : Tuple ): # Check the shards when the dataset is a round multiple of batch size. A_ = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCAmelCase ) A_ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase , even_batches=UpperCAmelCase ) A_ = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCAmelCase ) # Expected shouldn't change self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase , even_batches=UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size. A_ = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCAmelCase ) A_ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase , even_batches=UpperCAmelCase ) A_ = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCAmelCase ) A_ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase , even_batches=UpperCAmelCase ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. A_ = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCAmelCase ) A_ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase , even_batches=UpperCAmelCase ) A_ = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCAmelCase ) A_ = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase , even_batches=UpperCAmelCase ) # Check the shards when the dataset is very small. A_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCAmelCase ) A_ = [[[0, 1]], []] self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase , even_batches=UpperCAmelCase ) A_ = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCAmelCase ) A_ = [[], []] self.check_batch_sampler_shards(UpperCAmelCase , UpperCAmelCase , split_batches=UpperCAmelCase , even_batches=UpperCAmelCase ) def __A ( self : Union[str, Any] ): A_ = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] A_ = [BatchSamplerShard(UpperCAmelCase , 2 , UpperCAmelCase , even_batches=UpperCAmelCase ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] ) def __A ( self : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple=False , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : List[str]=False ): random.seed(UpperCAmelCase ) A_ = list(UpperCAmelCase ) A_ = [ IterableDatasetShard( UpperCAmelCase , batch_size=UpperCAmelCase , drop_last=UpperCAmelCase , num_processes=UpperCAmelCase , process_index=UpperCAmelCase , split_batches=UpperCAmelCase , ) for i in range(UpperCAmelCase ) ] A_ = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(UpperCAmelCase ) iterable_dataset_lists.append(list(UpperCAmelCase ) ) A_ = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size A_ = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) ) self.assertTrue(len(UpperCAmelCase ) % shard_batch_size == 0 ) A_ = [] for idx in range(0 , len(UpperCAmelCase ) , UpperCAmelCase ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(UpperCAmelCase ) < len(UpperCAmelCase ): reference += reference self.assertListEqual(UpperCAmelCase , reference[: len(UpperCAmelCase )] ) def __A ( self : Optional[int] ): A_ = 42 A_ = RandomIterableDataset() self.check_iterable_dataset_shards(UpperCAmelCase , UpperCAmelCase , batch_size=4 , drop_last=UpperCAmelCase , split_batches=UpperCAmelCase ) self.check_iterable_dataset_shards(UpperCAmelCase , UpperCAmelCase , batch_size=4 , drop_last=UpperCAmelCase , split_batches=UpperCAmelCase ) self.check_iterable_dataset_shards(UpperCAmelCase , UpperCAmelCase , batch_size=4 , drop_last=UpperCAmelCase , split_batches=UpperCAmelCase ) self.check_iterable_dataset_shards(UpperCAmelCase , UpperCAmelCase , batch_size=4 , drop_last=UpperCAmelCase , split_batches=UpperCAmelCase ) # Edge case with a very small dataset A_ = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(UpperCAmelCase , UpperCAmelCase , batch_size=4 , drop_last=UpperCAmelCase , split_batches=UpperCAmelCase ) self.check_iterable_dataset_shards(UpperCAmelCase , UpperCAmelCase , batch_size=4 , drop_last=UpperCAmelCase , split_batches=UpperCAmelCase ) self.check_iterable_dataset_shards(UpperCAmelCase , UpperCAmelCase , batch_size=4 , drop_last=UpperCAmelCase , split_batches=UpperCAmelCase ) self.check_iterable_dataset_shards(UpperCAmelCase , UpperCAmelCase , batch_size=4 , drop_last=UpperCAmelCase , split_batches=UpperCAmelCase ) def __A ( self : int ): A_ = BatchSampler(range(16 ) , batch_size=4 , drop_last=UpperCAmelCase ) A_ = SkipBatchSampler(UpperCAmelCase , 2 ) self.assertListEqual(list(UpperCAmelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def __A ( self : Optional[Any] ): A_ = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def __A ( self : Optional[int] ): A_ = DataLoader(list(range(16 ) ) , batch_size=4 ) A_ = skip_first_batches(UpperCAmelCase , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def __A ( self : str ): A_ = DataLoaderShard(list(range(16 ) ) , batch_size=4 ) for idx, _ in enumerate(UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def __A ( self : Dict ): Accelerator() A_ = DataLoaderDispatcher(range(16 ) , batch_size=4 ) for idx, _ in enumerate(UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(UpperCAmelCase ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
329
from __future__ import annotations def __snake_case ( __UpperCamelCase : int = 4 ): """simple docstring""" A_ = abs(__UpperCamelCase ) or 4 return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )] def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" return reverse_row(transpose(__UpperCamelCase ) ) # OR.. transpose(reverse_column(matrix)) def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" return reverse_row(reverse_column(__UpperCamelCase ) ) # OR.. reverse_column(reverse_row(matrix)) def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" return reverse_column(transpose(__UpperCamelCase ) ) # OR.. transpose(reverse_row(matrix)) def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" A_ = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )] return matrix def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" A_ = matrix[::-1] return matrix def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" A_ = [x[::-1] for x in matrix] return matrix def __snake_case ( __UpperCamelCase : list[list[int]] ): """simple docstring""" for i in matrix: print(*__UpperCamelCase ) if __name__ == "__main__": __a :Any = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 90 counterclockwise:\n') print_matrix(rotate_aa(matrix)) __a :Any = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 180:\n') print_matrix(rotate_aaa(matrix)) __a :Any = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 270 counterclockwise:\n') print_matrix(rotate_aaa(matrix))
329
1
def __snake_case ( __UpperCamelCase : int ): """simple docstring""" if bit_count < 0: raise ValueError("The given input must be positive" ) # get the generated string sequence A_ = gray_code_sequence_string(__UpperCamelCase ) # # convert them to integers for i in range(len(__UpperCamelCase ) ): A_ = int(sequence[i] ,2 ) return sequence def __snake_case ( __UpperCamelCase : int ): """simple docstring""" if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] A_ = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits A_ = gray_code_sequence_string(bit_count - 1 ) A_ = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): A_ = "0" + smaller_sequence[i] sequence.append(__UpperCamelCase ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): A_ = "1" + smaller_sequence[i] sequence.append(__UpperCamelCase ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
329
from ..utils import DummyObject, requires_backends class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Union[str, Any] = ['torch', 'transformers', 'onnx'] def __init__( self : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Tuple , *UpperCAmelCase : Tuple , **UpperCAmelCase : Union[str, Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Dict , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Tuple ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Tuple = ['torch', 'transformers', 'onnx'] def __init__( self : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : List[Any] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Tuple , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Any = ['torch', 'transformers', 'onnx'] def __init__( self : Dict , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Optional[int] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : List[str] = ['torch', 'transformers', 'onnx'] def __init__( self : List[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : int ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Any , *UpperCAmelCase : List[Any] , **UpperCAmelCase : str ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : Dict = ['torch', 'transformers', 'onnx'] def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : Tuple ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : Dict ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : int , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : List[str] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) class _a ( metaclass=snake_case_ ): """simple docstring""" _lowerCamelCase : List[Any] = ['torch', 'transformers', 'onnx'] def __init__( self : str , *UpperCAmelCase : str , **UpperCAmelCase : List[Any] ): requires_backends(self , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : List[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[Any] ): requires_backends(cls , ["torch", "transformers", "onnx"] ) @classmethod def __A ( cls : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ): requires_backends(cls , ["torch", "transformers", "onnx"] )
329
1
import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html __a :int = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class _a : """simple docstring""" _lowerCamelCase : Dict = PegasusConfig _lowerCamelCase : Dict = {} _lowerCamelCase : List[str] = 'gelu' def __init__( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : int=13 , UpperCAmelCase : Optional[int]=7 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : str=False , UpperCAmelCase : str=99 , UpperCAmelCase : Tuple=32 , UpperCAmelCase : str=5 , UpperCAmelCase : Any=4 , UpperCAmelCase : Optional[Any]=37 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : Union[str, Any]=20 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : Dict=1 , UpperCAmelCase : Dict=0 , ): A_ = parent A_ = batch_size A_ = seq_length A_ = is_training A_ = use_labels A_ = vocab_size A_ = hidden_size A_ = num_hidden_layers A_ = num_attention_heads A_ = intermediate_size A_ = hidden_dropout_prob A_ = attention_probs_dropout_prob A_ = max_position_embeddings A_ = eos_token_id A_ = pad_token_id A_ = bos_token_id def __A ( self : int ): A_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) A_ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) A_ = np.concatenate([input_ids, eos_tensor] , axis=1 ) A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) A_ = prepare_pegasus_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) return config, inputs_dict def __A ( self : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str ): A_ = 20 A_ = model_class_name(UpperCAmelCase ) A_ = model.encode(inputs_dict["input_ids"] ) A_ , A_ = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) A_ = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase , UpperCAmelCase ) A_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" ) A_ = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) A_ = model.decode( decoder_input_ids[:, :-1] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , ) A_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) A_ = model.decode( decoder_input_ids[:, -1:] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase , ) A_ = model.decode(UpperCAmelCase , UpperCAmelCase ) A_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' ) def __A ( self : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ): A_ = 20 A_ = model_class_name(UpperCAmelCase ) A_ = model.encode(inputs_dict["input_ids"] ) A_ , A_ = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) A_ = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) A_ = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase , UpperCAmelCase ) A_ = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) A_ = model.decode( decoder_input_ids[:, :-1] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , ) A_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) A_ = model.decode( decoder_input_ids[:, -1:] , UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , ) A_ = model.decode(UpperCAmelCase , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase ) A_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' ) def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : str ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Dict=None ,__UpperCamelCase : Any=None ,): """simple docstring""" if attention_mask is None: A_ = np.not_equal(__UpperCamelCase ,config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: A_ = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape ,dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ).astype(np.inta ), ] ,axis=-1 ,) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class _a ( snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : List[str] = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) _lowerCamelCase : int = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () _lowerCamelCase : Dict = True _lowerCamelCase : Union[str, Any] = False _lowerCamelCase : Optional[Any] = False _lowerCamelCase : List[str] = False def __A ( self : Tuple ): A_ = FlaxPegasusModelTester(self ) A_ = ConfigTester(self , config_class=UpperCAmelCase ) def __A ( self : str ): self.config_tester.run_common_tests() def __A ( self : List[Any] ): A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def __A ( self : Optional[int] ): A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def __A ( self : List[str] ): A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) A_ = model_class(UpperCAmelCase ) @jax.jit def encode_jitted(UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=None , **UpperCAmelCase : str ): return model.encode(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase ) with self.subTest("JIT Enabled" ): A_ = encode_jitted(**UpperCAmelCase ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): A_ = encode_jitted(**UpperCAmelCase ).to_tuple() self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) ) for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def __A ( self : Optional[int] ): A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): A_ = model_class(UpperCAmelCase ) A_ = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] ) A_ = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] ): return model.decode( decoder_input_ids=UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , encoder_outputs=UpperCAmelCase , ) with self.subTest("JIT Enabled" ): A_ = decode_jitted(**UpperCAmelCase ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): A_ = decode_jitted(**UpperCAmelCase ).to_tuple() self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) ) for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def __A ( self : List[str] ): for model_class_name in self.all_model_classes: A_ = model_class_name.from_pretrained("google/pegasus-large" , from_pt=UpperCAmelCase ) A_ = np.ones((1, 1) ) A_ = model(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) @slow def __A ( self : Dict ): A_ = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" ) A_ = PegasusTokenizer.from_pretrained("google/pegasus-xsum" ) A_ = [ " PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.", " The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ", ] A_ = [ "California's largest electricity provider has turned off power to hundreds of thousands of customers.", "Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.", ] A_ = tokenizer(UpperCAmelCase , return_tensors="np" , truncation=UpperCAmelCase , max_length=512 , padding=UpperCAmelCase ) A_ = model.generate(**UpperCAmelCase , num_beams=2 ).sequences A_ = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase ) assert tgt_text == decoded
329
import itertools import math def __snake_case ( __UpperCamelCase : int ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 ,int(math.sqrt(__UpperCamelCase ) + 1 ) ,6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __snake_case ( ): """simple docstring""" A_ = 2 while True: if is_prime(__UpperCamelCase ): yield num num += 1 def __snake_case ( __UpperCamelCase : int = 1_0001 ): """simple docstring""" return next(itertools.islice(prime_generator() ,nth - 1 ,__UpperCamelCase ) ) if __name__ == "__main__": print(F"{solution() = }")
329
1
from __future__ import annotations import math import numpy as np from numpy.linalg import norm def __snake_case ( __UpperCamelCase : np.ndarray ,__UpperCamelCase : np.ndarray ): """simple docstring""" return math.sqrt(sum(pow(a - b ,2 ) for a, b in zip(__UpperCamelCase ,__UpperCamelCase ) ) ) def __snake_case ( __UpperCamelCase : np.ndarray ,__UpperCamelCase : np.ndarray ): """simple docstring""" if dataset.ndim != value_array.ndim: A_ = ( "Wrong input data's dimensions... " f'''dataset : {dataset.ndim}, value_array : {value_array.ndim}''' ) raise ValueError(__UpperCamelCase ) try: if dataset.shape[1] != value_array.shape[1]: A_ = ( "Wrong input data's shape... " f'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}''' ) raise ValueError(__UpperCamelCase ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError("Wrong shape" ) if dataset.dtype != value_array.dtype: A_ = ( "Input data have different datatype... " f'''dataset : {dataset.dtype}, value_array : {value_array.dtype}''' ) raise TypeError(__UpperCamelCase ) A_ = [] for value in value_array: A_ = euclidean(__UpperCamelCase ,dataset[0] ) A_ = dataset[0].tolist() for dataset_value in dataset[1:]: A_ = euclidean(__UpperCamelCase ,__UpperCamelCase ) if dist > temp_dist: A_ = temp_dist A_ = dataset_value.tolist() answer.append([vector, dist] ) return answer def __snake_case ( __UpperCamelCase : np.ndarray ,__UpperCamelCase : np.ndarray ): """simple docstring""" return np.dot(__UpperCamelCase ,__UpperCamelCase ) / (norm(__UpperCamelCase ) * norm(__UpperCamelCase )) if __name__ == "__main__": import doctest doctest.testmod()
329
from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class _a : """simple docstring""" def __init__( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : List[str]=13 , UpperCAmelCase : Tuple=7 , UpperCAmelCase : int=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Optional[Any]=99 , UpperCAmelCase : str=32 , UpperCAmelCase : Dict=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Optional[int]=37 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : int=16 , UpperCAmelCase : Any=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : List[Any]=None , ): A_ = parent A_ = 13 A_ = 7 A_ = True A_ = True A_ = True A_ = True A_ = 99 A_ = 384 A_ = 2 A_ = 4 A_ = 37 A_ = "gelu" A_ = 0.1 A_ = 0.1 A_ = 512 A_ = 16 A_ = 2 A_ = 0.02 A_ = 3 A_ = 4 A_ = 128 A_ = 2 A_ = 9 A_ = 1 A_ = None def __A ( self : Optional[int] ): A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ = None if self.use_input_mask: A_ = random_attention_mask([self.batch_size, self.seq_length] ) A_ = None if self.use_token_type_ids: A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A_ = None A_ = None A_ = None if self.use_labels: A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A_ = ids_tensor([self.batch_size] , self.num_choices ) A_ = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ): A_ = TFConvBertModel(config=UpperCAmelCase ) A_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} A_ = [input_ids, input_mask] A_ = model(UpperCAmelCase ) A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Tuple ): A_ = TFConvBertForMaskedLM(config=UpperCAmelCase ) A_ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : int ): A_ = self.num_labels A_ = TFConvBertForSequenceClassification(config=UpperCAmelCase ) A_ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ): A_ = self.num_choices A_ = TFConvBertForMultipleChoice(config=UpperCAmelCase ) A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) A_ = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) A_ = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __A ( self : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str ): A_ = self.num_labels A_ = TFConvBertForTokenClassification(config=UpperCAmelCase ) A_ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __A ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ): A_ = TFConvBertForQuestionAnswering(config=UpperCAmelCase ) A_ = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } A_ = model(UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __A ( self : List[str] ): A_ = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) = config_and_inputs A_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class _a ( snake_case_ , snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : Union[str, Any] = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) _lowerCamelCase : Any = ( { 'feature-extraction': TFConvBertModel, 'fill-mask': TFConvBertForMaskedLM, 'question-answering': TFConvBertForQuestionAnswering, 'text-classification': TFConvBertForSequenceClassification, 'token-classification': TFConvBertForTokenClassification, 'zero-shot': TFConvBertForSequenceClassification, } if is_tf_available() else {} ) _lowerCamelCase : Dict = False _lowerCamelCase : Optional[int] = False _lowerCamelCase : Dict = False def __A ( self : List[str] ): A_ = TFConvBertModelTester(self ) A_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 ) def __A ( self : Tuple ): self.config_tester.run_common_tests() def __A ( self : Tuple ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase ) def __A ( self : Dict ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase ) def __A ( self : List[Any] ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase ) def __A ( self : Dict ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase ) def __A ( self : int ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase ) def __A ( self : List[Any] ): A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase ) @slow def __A ( self : str ): A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() A_ = True A_ = True if hasattr(UpperCAmelCase , "use_cache" ): A_ = True A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase ) for model_class in self.all_model_classes: A_ = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) A_ = model_class(UpperCAmelCase ) A_ = len(model(UpperCAmelCase ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCAmelCase , saved_model=UpperCAmelCase ) A_ = os.path.join(UpperCAmelCase , "saved_model" , "1" ) A_ = tf.keras.models.load_model(UpperCAmelCase ) A_ = model(UpperCAmelCase ) if self.is_encoder_decoder: A_ = outputs["encoder_hidden_states"] A_ = outputs["encoder_attentions"] else: A_ = outputs["hidden_states"] A_ = outputs["attentions"] self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase ) A_ = getattr( self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def __A ( self : List[str] ): A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) self.assertIsNotNone(UpperCAmelCase ) def __A ( self : Any ): A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() A_ = True A_ = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length ) A_ = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length ) A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase ) A_ = getattr(self.model_tester , "key_length" , UpperCAmelCase ) def check_decoder_attentions_output(UpperCAmelCase : Optional[int] ): A_ = len(UpperCAmelCase ) self.assertEqual(out_len % 2 , 0 ) A_ = outputs.decoder_attentions self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(UpperCAmelCase : Optional[Any] ): A_ = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: A_ = True A_ = False A_ = model_class(UpperCAmelCase ) A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) A_ = len(UpperCAmelCase ) self.assertEqual(config.output_hidden_states , UpperCAmelCase ) check_encoder_attentions_output(UpperCAmelCase ) if self.is_encoder_decoder: A_ = model_class(UpperCAmelCase ) A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , UpperCAmelCase ) check_decoder_attentions_output(UpperCAmelCase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] A_ = True A_ = model_class(UpperCAmelCase ) A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , UpperCAmelCase ) check_encoder_attentions_output(UpperCAmelCase ) # Check attention is always last and order is fine A_ = True A_ = True A_ = model_class(UpperCAmelCase ) A_ = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase ) ) self.assertEqual(model.config.output_hidden_states , UpperCAmelCase ) check_encoder_attentions_output(UpperCAmelCase ) @require_tf class _a ( unittest.TestCase ): """simple docstring""" @slow def __A ( self : Dict ): A_ = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" ) A_ = tf.constant([[0, 1, 2, 3, 4, 5]] ) A_ = model(UpperCAmelCase )[0] A_ = [1, 6, 768] self.assertEqual(output.shape , UpperCAmelCase ) A_ = tf.constant( [ [ [-0.03_475_493, -0.4_686_034, -0.30_638_832], [0.22_637_248, -0.26_988_646, -0.7_423_424], [0.10_324_868, -0.45_013_508, -0.58_280_784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-4 )
329
1