code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class __lowerCamelCase ( UpperCamelCase__ , unittest.TestCase ): """simple docstring""" snake_case__ = XLNetTokenizer snake_case__ = XLNetTokenizerFast snake_case__ = True snake_case__ = True def a ( self : str ) -> str: super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ = XLNetTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ ) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname ) def a ( self : List[str] ) -> List[Any]: lowerCAmelCase__ = "<s>" lowerCAmelCase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) def a ( self : Union[str, Any] ) -> str: lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "<eod>" ) self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 1_006 ) def a ( self : int ) -> Dict: self.assertEqual(self.get_tokenizer().vocab_size , 1_000 ) def a ( self : List[str] ) -> Any: lowerCAmelCase__ = XLNetTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = tokenizer.tokenize("This is a test" ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [285, 46, 10, 170, 382] ) lowerCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( SCREAMING_SNAKE_CASE__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] ) lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ) self.assertListEqual( SCREAMING_SNAKE_CASE__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) def a ( self : Optional[int] ) -> Optional[Any]: lowerCAmelCase__ = XLNetTokenizer(SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( SCREAMING_SNAKE_CASE__ , [ SPIECE_UNDERLINE + "", "i", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "se", ".", ] , ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["▁he", "ll", "o"] ) def a ( self : List[Any] ) -> Optional[int]: lowerCAmelCase__ = XLNetTokenizer(SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( SCREAMING_SNAKE_CASE__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "se", ".", ] , ) @slow def a ( self : Any ) -> Any: lowerCAmelCase__ = XLNetTokenizer.from_pretrained("xlnet-base-cased" ) lowerCAmelCase__ = tokenizer.encode("sequence builders" , add_special_tokens=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = tokenizer.encode("multi-sequence build" , add_special_tokens=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def a ( self : Union[str, Any] ) -> Any: # fmt: off lowerCAmelCase__ = {"input_ids": [[17, 21_442, 270, 17, 10, 14_645, 318, 34, 17, 4_546, 3_145, 787, 13, 7_752, 22_018, 23, 21, 17, 4_546, 3_145, 787, 13, 3_352, 14_431, 13, 5_500, 11, 1_176, 580, 13, 16_819, 4_797, 23, 17, 10, 17_135, 658, 19, 457, 7_932, 13, 184, 19, 3_154, 17_135, 6_468, 19, 1_404, 12_269, 19, 4_229, 5_356, 16_264, 46, 19, 17, 20_545, 10_395, 9, 9, 9, 11, 28, 6_421, 9_531, 20_729, 17, 10, 353, 17_022, 11, 21, 6_421, 9_531, 16_949, 17, 10, 11_509, 753, 11, 33, 95, 2_421, 7_385, 956, 14_431, 2_626, 25, 842, 7_385, 4_836, 21, 1_429, 2_272, 9_855, 3_120, 161, 24_738, 19, 13_203, 658, 218, 787, 21, 430, 18_482, 847, 2_637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22_178, 27, 1_064, 22, 956, 13, 11_101, 1_429, 5_854, 24_313, 18_953, 40, 422, 24_366, 68, 1_758, 37, 10_483, 14_257, 31, 207, 263, 21, 203, 3_773, 25, 71, 9_735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2_049, 3_442, 17, 13_894, 3_380, 23, 95, 18, 17_634, 2_288, 9, 4, 3]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE__ , model_name="xlnet-base-cased" , revision="c841166438c31ec7ca9a106dee7bb312b73ae511" , )
61
import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = os.path.join(args.tf_model_dir , '''parameters.json''' ) lowercase__ = json.loads(open(SCREAMING_SNAKE_CASE ).read() ) if not params: raise ValueError( f'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' ) if not args.output.endswith('''.pt''' ): lowercase__ = args.output + '''.pt''' lowercase__ = OrderedDict() with tf.device('''/CPU:0''' ): lowercase__ = tf.train.load_checkpoint(args.tf_model_dir ) lowercase__ = reader.get_variable_to_shape_map() for key_name in shapes.keys(): lowercase__ = reader.get_tensor(SCREAMING_SNAKE_CASE ).astype(np.floataa ) if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ): continue if key_name.startswith('''pasts/''' ): if key_name.startswith('''pasts/mlp''' ): lowercase__ = int(key_name[9] ) elif key_name.startswith('''pasts/out''' ): lowercase__ = 8 lowercase__ = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/moe''' ): lowercase__ = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/switch_gating/kernel''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/softmlp/kernel''' ): lowercase__ = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ): lowercase__ = key_name[-9:-7] for i in range(16 ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer) lowercase__ = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/mlp''' ): lowercase__ = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/p1/kernel''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/p1/bias''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/p2/kernel''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/p2/bias''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/ln''' ): lowercase__ = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): lowercase__ = '''model.blocks.%d.feed_forward.norm.bias''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/g''' ): lowercase__ = '''model.blocks.%d.feed_forward.norm.weight''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/att''' ): lowercase__ = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/qkv/kernel''' ): lowercase__ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum lowercase__ = state[:, 0, :, :] lowercase__ = state[:, 1, :, :] lowercase__ = state[:, 2, :, :] lowercase__ = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase__ = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase__ = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase__ = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) lowercase__ = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) lowercase__ = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/o/kernel''' ): lowercase__ = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player lowercase__ = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/an''' ): lowercase__ = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): lowercase__ = '''model.blocks.%d.self_attn.norm.bias''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/g''' ): lowercase__ = '''model.blocks.%d.self_attn.norm.weight''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif ( key_name.startswith('''model/wte''' ) or key_name.startswith('''model/wpe''' ) or key_name.startswith('''model/ete''' ) ): lowercase__ = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[ key_name[-3:] ] lowercase__ = '''model.%s.weight''' % nlayer lowercase__ = vnp.copy() # same in embedded lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) if key_name.startswith('''model/wte''' ): lowercase__ = '''lm_head.weight''' lowercase__ = vnp.copy() # same in embedded lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/wob''' ): lowercase__ = '''final_logits_bias''' lowercase__ = vnp.copy() # same in embedded lowercase__ = state.reshape((1, -1) ) lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name == "model/dense/kernel": lowercase__ = '''model.last_project.weight''' lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name == "model/dense_1/bias": lowercase__ = '''model.last_project.bias''' lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) torch.save(SCREAMING_SNAKE_CASE , args.output ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser( description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model') parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model') lowerCAmelCase = parser.parse_args() convert_tf_gptsan_to_pt(args)
43
0
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : int = ArgumentParser( description=( "PyTorch TPU distributed training launch " "helper utility that will spawn up " "multiple distributed processes" ) ) # Optional arguments for the launch helper parser.add_argument("--num_cores" , type=lowercase , default=1 , help="Number of TPU cores to use (1 or 8)." ) # positional parser.add_argument( "training_script" , type=lowercase , help=( "The full path to the single TPU training " "program/script to be launched in parallel, " "followed by all the arguments for the " "training script" ) , ) # rest from the training program parser.add_argument("training_script_args" , nargs=lowercase ) return parser.parse_args() def lowerCamelCase__ ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = parse_args() # Import training_script as a module. SCREAMING_SNAKE_CASE : Optional[Any] = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) SCREAMING_SNAKE_CASE : List[Any] = script_fpath.stem SCREAMING_SNAKE_CASE : List[str] = importlib.import_module(lowercase ) # Patch sys.argv SCREAMING_SNAKE_CASE : List[str] = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
62
from __future__ import annotations def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" return len(set(SCREAMING_SNAKE_CASE ) ) == len(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
43
0
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar a : Dict = TypeVar("T") def lowerCamelCase__ ( __lowerCamelCase : int ): return (position - 1) // 2 def lowerCamelCase__ ( __lowerCamelCase : int ): return (2 * position) + 1 def lowerCamelCase__ ( __lowerCamelCase : int ): return (2 * position) + 2 class a ( Generic[T] ): """simple docstring""" def __init__( self : List[Any] ) -> None: __UpperCAmelCase : list[tuple[T, int]] = [] __UpperCAmelCase : dict[T, int] = {} __UpperCAmelCase : int = 0 def __len__( self : Optional[int] ) -> int: return self.elements def __repr__( self : str ) -> str: return str(self.heap ) def UpperCAmelCase ( self : Dict ) -> bool: # Check if the priority queue is empty return self.elements == 0 def UpperCAmelCase ( self : Union[str, Any] , __lowercase : T , __lowercase : int ) -> None: # Add an element with given priority to the queue self.heap.append((elem, weight) ) __UpperCAmelCase : Any = self.elements self.elements += 1 self._bubble_up(__lowercase ) def UpperCAmelCase ( self : Tuple ) -> T: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) __UpperCAmelCase , __UpperCAmelCase : Dict = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: __UpperCAmelCase , __UpperCAmelCase : List[str] = self.heap[0] self._bubble_down(__lowercase ) return elem def UpperCAmelCase ( self : str , __lowercase : T , __lowercase : int ) -> None: # Update the weight of the given key __UpperCAmelCase : Optional[int] = self.position_map[elem] __UpperCAmelCase : Union[str, Any] = (elem, weight) if position > 0: __UpperCAmelCase : Dict = get_parent_position(__lowercase ) __UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.heap[parent_position] if parent_weight > weight: self._bubble_up(__lowercase ) else: self._bubble_down(__lowercase ) else: self._bubble_down(__lowercase ) def UpperCAmelCase ( self : Union[str, Any] , __lowercase : T ) -> None: # Place a node at the proper position (upward movement) [to be used internally # only] __UpperCAmelCase : List[str] = self.position_map[elem] if curr_pos == 0: return None __UpperCAmelCase : Dict = get_parent_position(__lowercase ) __UpperCAmelCase , __UpperCAmelCase : Dict = self.heap[curr_pos] __UpperCAmelCase , __UpperCAmelCase : Dict = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(__lowercase , __lowercase ) return self._bubble_up(__lowercase ) return None def UpperCAmelCase ( self : str , __lowercase : T ) -> None: # Place a node at the proper position (downward movement) [to be used # internally only] __UpperCAmelCase : Optional[Any] = self.position_map[elem] __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.heap[curr_pos] __UpperCAmelCase : Tuple = get_child_left_position(__lowercase ) __UpperCAmelCase : Tuple = get_child_right_position(__lowercase ) if child_left_position < self.elements and child_right_position < self.elements: __UpperCAmelCase , __UpperCAmelCase : Tuple = self.heap[child_left_position] __UpperCAmelCase , __UpperCAmelCase : List[Any] = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(__lowercase , __lowercase ) return self._bubble_down(__lowercase ) if child_left_position < self.elements: __UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(__lowercase , __lowercase ) return self._bubble_down(__lowercase ) else: return None if child_right_position < self.elements: __UpperCAmelCase , __UpperCAmelCase : Any = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(__lowercase , __lowercase ) return self._bubble_down(__lowercase ) return None def UpperCAmelCase ( self : Any , __lowercase : int , __lowercase : int ) -> None: # Swap the nodes at the given positions __UpperCAmelCase : Union[str, Any] = self.heap[nodea_pos][0] __UpperCAmelCase : Optional[int] = self.heap[nodea_pos][0] __UpperCAmelCase , __UpperCAmelCase : List[Any] = ( self.heap[nodea_pos], self.heap[nodea_pos], ) __UpperCAmelCase : Union[str, Any] = nodea_pos __UpperCAmelCase : List[str] = nodea_pos class a ( Generic[T] ): """simple docstring""" def __init__( self : Union[str, Any] ) -> None: __UpperCAmelCase : dict[T, dict[T, int]] = {} __UpperCAmelCase : int = 0 def __repr__( self : List[str] ) -> str: return str(self.connections ) def __len__( self : Any ) -> int: return self.nodes def UpperCAmelCase ( self : Optional[Any] , __lowercase : T ) -> None: # Add a node in the graph if it is not in the graph if node not in self.connections: __UpperCAmelCase : Optional[int] = {} self.nodes += 1 def UpperCAmelCase ( self : List[str] , __lowercase : T , __lowercase : T , __lowercase : int ) -> None: # Add an edge between 2 nodes in the graph self.add_node(__lowercase ) self.add_node(__lowercase ) __UpperCAmelCase : Tuple = weight __UpperCAmelCase : Union[str, Any] = weight def lowerCamelCase__ ( __lowerCamelCase : GraphUndirectedWeighted[T] , ): __UpperCAmelCase : dict[T, int] = {node: maxsize for node in graph.connections} __UpperCAmelCase : dict[T, T | None] = {node: None for node in graph.connections} __UpperCAmelCase : MinPriorityQueue[T] = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(__lowerCamelCase , __lowerCamelCase ) if priority_queue.is_empty(): return dist, parent # initialization __UpperCAmelCase : Any = priority_queue.extract_min() __UpperCAmelCase : Dict = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __UpperCAmelCase : Union[str, Any] = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(__lowerCamelCase , dist[neighbour] ) __UpperCAmelCase : List[Any] = node # running prim's algorithm while not priority_queue.is_empty(): __UpperCAmelCase : Optional[Any] = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __UpperCAmelCase : Optional[Any] = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(__lowerCamelCase , dist[neighbour] ) __UpperCAmelCase : List[Any] = node return dist, parent
63
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase = { 'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'], 'tokenization_convbert': ['ConvBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['ConvBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ 'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvBertForMaskedLM', 'ConvBertForMultipleChoice', 'ConvBertForQuestionAnswering', 'ConvBertForSequenceClassification', 'ConvBertForTokenClassification', 'ConvBertLayer', 'ConvBertModel', 'ConvBertPreTrainedModel', 'load_tf_weights_in_convbert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ 'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFConvBertForMaskedLM', 'TFConvBertForMultipleChoice', 'TFConvBertForQuestionAnswering', 'TFConvBertForSequenceClassification', 'TFConvBertForTokenClassification', 'TFConvBertLayer', 'TFConvBertModel', 'TFConvBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
43
0
import numpy as np def A__ ( snake_case_ : str , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[int] ): SCREAMING_SNAKE_CASE__: List[Any]= int(np.ceil((x_end - xa) / h ) ) SCREAMING_SNAKE_CASE__: Any= np.zeros((n + 1,) ) SCREAMING_SNAKE_CASE__: int= ya SCREAMING_SNAKE_CASE__: Tuple= xa for k in range(snake_case_ ): SCREAMING_SNAKE_CASE__: Any= f(snake_case_ , y[k] ) SCREAMING_SNAKE_CASE__: Optional[int]= f(x + 0.5 * h , y[k] + 0.5 * h * ka ) SCREAMING_SNAKE_CASE__: Tuple= f(x + 0.5 * h , y[k] + 0.5 * h * ka ) SCREAMING_SNAKE_CASE__: List[str]= f(x + h , y[k] + h * ka ) SCREAMING_SNAKE_CASE__: Tuple= y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka) x += h return y if __name__ == "__main__": import doctest doctest.testmod()
64
import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _a ( unittest.TestCase ): def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]: """simple docstring""" super().tearDown() gc.collect() def lowerCamelCase_ ( self: Dict ) -> Tuple: """simple docstring""" lowercase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) lowercase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) lowercase__ = '''xvjiarui/stable-diffusion-2-inpainting''' lowercase__ , lowercase__ = FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ ) lowercase__ = '''Face of a yellow cat, high resolution, sitting on a park bench''' lowercase__ = jax.random.PRNGKey(0 ) lowercase__ = 50 lowercase__ = jax.device_count() lowercase__ = num_samples * [prompt] lowercase__ = num_samples * [init_image] lowercase__ = num_samples * [mask_image] lowercase__ , lowercase__ , lowercase__ = pipeline.prepare_inputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # shard inputs and rng lowercase__ = replicate(UpperCamelCase_ ) lowercase__ = jax.random.split(UpperCamelCase_ , jax.device_count() ) lowercase__ = shard(UpperCamelCase_ ) lowercase__ = shard(UpperCamelCase_ ) lowercase__ = shard(UpperCamelCase_ ) lowercase__ = pipeline( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , jit=UpperCamelCase_ ) lowercase__ = output.images.reshape(UpperCamelCase_ , 512 , 512 , 3 ) lowercase__ = images[0, 253:256, 253:256, -1] lowercase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowercase__ = jnp.array( [0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] ) print(f'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
43
0
"""simple docstring""" from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
65
from __future__ import annotations import math def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if len(SCREAMING_SNAKE_CASE ) == 0: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , ) return min( minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , ) def _a ( ): """simple docstring""" lowercase__ = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23] lowercase__ = math.log(len(SCREAMING_SNAKE_CASE ) , 2 ) print('''Optimal value : ''' , end='''''' ) print(minimax(0 , 0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
43
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class lowerCAmelCase_ ( unittest.TestCase ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=1_8 , _lowerCAmelCase=3_0 , _lowerCAmelCase=4_0_0 , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=True , ): _lowercase : Optional[Any] = size if size is not None else {'shortest_edge': 2_0} _lowercase : List[str] = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8} _lowercase : Optional[Any] = parent _lowercase : List[Any] = batch_size _lowercase : Tuple = num_channels _lowercase : Optional[Any] = image_size _lowercase : str = min_resolution _lowercase : Optional[Any] = max_resolution _lowercase : Union[str, Any] = do_resize _lowercase : List[Any] = size _lowercase : List[str] = do_center_crop _lowercase : Tuple = crop_size _lowercase : List[Any] = do_flip_channel_order def __a ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class lowerCAmelCase_ ( __snake_case , unittest.TestCase ): _UpperCamelCase : Optional[int] = MobileViTImageProcessor if is_vision_available() else None def __a ( self ): _lowercase : List[str] = MobileViTImageProcessingTester(self ) @property def __a ( self ): return self.image_processor_tester.prepare_image_processor_dict() def __a ( self ): _lowercase : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCAmelCase , 'do_resize' ) ) self.assertTrue(hasattr(_lowerCAmelCase , 'size' ) ) self.assertTrue(hasattr(_lowerCAmelCase , 'do_center_crop' ) ) self.assertTrue(hasattr(_lowerCAmelCase , 'center_crop' ) ) self.assertTrue(hasattr(_lowerCAmelCase , 'do_flip_channel_order' ) ) def __a ( self ): _lowercase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 2_0} ) self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} ) _lowercase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {'shortest_edge': 4_2} ) self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} ) def __a ( self ): pass def __a ( self ): # Initialize image_processing _lowercase : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowercase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase , Image.Image ) # Test not batched input _lowercase : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _lowercase : List[str] = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def __a ( self ): # Initialize image_processing _lowercase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _lowercase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase , np.ndarray ) # Test not batched input _lowercase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _lowercase : Union[str, Any] = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def __a ( self ): # Initialize image_processing _lowercase : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowercase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase , torch.Tensor ) # Test not batched input _lowercase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _lowercase : List[Any] = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
66
class _a : def __init__( self: Tuple , UpperCamelCase_: Dict ) -> List[str]: """simple docstring""" lowercase__ = val lowercase__ = None lowercase__ = None def lowerCamelCase_ ( self: Any , UpperCamelCase_: Any ) -> Union[str, Any]: """simple docstring""" if self.val: if val < self.val: if self.left is None: lowercase__ = Node(UpperCamelCase_ ) else: self.left.insert(UpperCamelCase_ ) elif val > self.val: if self.right is None: lowercase__ = Node(UpperCamelCase_ ) else: self.right.insert(UpperCamelCase_ ) else: lowercase__ = val def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" if root: inorder(root.left , SCREAMING_SNAKE_CASE ) res.append(root.val ) inorder(root.right , SCREAMING_SNAKE_CASE ) def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" if len(SCREAMING_SNAKE_CASE ) == 0: return arr lowercase__ = Node(arr[0] ) for i in range(1 , len(SCREAMING_SNAKE_CASE ) ): root.insert(arr[i] ) # Traverse BST in order. lowercase__ = [] inorder(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
43
0
def SCREAMING_SNAKE_CASE__ ( snake_case__ :int = 3 , snake_case__ :int = 7 , snake_case__ :int = 100_0000 ) -> int: _lowercase = 0 _lowercase = 1 for current_denominator in range(1 , limit + 1 ): _lowercase = current_denominator * numerator // denominator if current_denominator % denominator == 0: current_numerator -= 1 if current_numerator * max_denominator > current_denominator * max_numerator: _lowercase = current_numerator _lowercase = current_denominator return max_numerator if __name__ == "__main__": print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0))
67
lowerCAmelCase = { 'a': 'AAAAA', 'b': 'AAAAB', 'c': 'AAABA', 'd': 'AAABB', 'e': 'AABAA', 'f': 'AABAB', 'g': 'AABBA', 'h': 'AABBB', 'i': 'ABAAA', 'j': 'BBBAA', 'k': 'ABAAB', 'l': 'ABABA', 'm': 'ABABB', 'n': 'ABBAA', 'o': 'ABBAB', 'p': 'ABBBA', 'q': 'ABBBB', 'r': 'BAAAA', 's': 'BAAAB', 't': 'BAABA', 'u': 'BAABB', 'v': 'BBBAB', 'w': 'BABAA', 'x': 'BABAB', 'y': 'BABBA', 'z': 'BABBB', ' ': ' ', } lowerCAmelCase = {value: key for key, value in encode_dict.items()} def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = '''''' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('''encode() accepts only letters of the alphabet and spaces''' ) return encoded def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" if set(SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set(): raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' ) lowercase__ = '''''' for word in coded.split(): while len(SCREAMING_SNAKE_CASE ) != 0: decoded += decode_dict[word[:5]] lowercase__ = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
43
0
import json import os import tempfile from unittest.mock import patch import torch from torch.utils.data import DataLoader, TensorDataset from accelerate import DistributedType, infer_auto_device_map, init_empty_weights from accelerate.accelerator import Accelerator from accelerate.state import GradientState, PartialState from accelerate.test_utils import require_bnb, require_multi_gpu, slow from accelerate.test_utils.testing import AccelerateTestCase, require_cuda from accelerate.utils import patch_environment def lowercase__ ( ) -> List[str]: """simple docstring""" __UpperCAmelCase =torch.nn.Linear(2 , 4 ) __UpperCAmelCase =torch.optim.AdamW(model.parameters() , lr=1.0 ) __UpperCAmelCase =torch.optim.lr_scheduler.OneCycleLR(A_ , max_lr=0.0_1 , steps_per_epoch=2 , epochs=1 ) __UpperCAmelCase =DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) ) __UpperCAmelCase =DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) ) return model, optimizer, scheduler, train_dl, valid_dl def lowercase__ ( A_: Optional[Any] ) -> str: """simple docstring""" return (model.weight.abs().sum() + model.bias.abs().sum()).item() def lowercase__ ( A_: Union[str, Any] ) -> int: """simple docstring""" __UpperCAmelCase =torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict() model.load_state_dict(A_ ) class _A ( UpperCamelCase ): """simple docstring""" @require_cuda def _a ( self : List[Any] ) -> Any: __UpperCAmelCase =Accelerator() assert PartialState._shared_state["_cpu"] is False assert PartialState._shared_state["device"].type == "cuda" with self.assertRaises(__SCREAMING_SNAKE_CASE ): __UpperCAmelCase =Accelerator(cpu=__SCREAMING_SNAKE_CASE ) def _a ( self : Dict ) -> Union[str, Any]: __UpperCAmelCase =Accelerator() __UpperCAmelCase =GradientState() assert state.num_steps == 1 __UpperCAmelCase =4 assert state.num_steps == 4 assert state.sync_gradients is True __UpperCAmelCase =False assert state.sync_gradients is False GradientState._reset_state() def _a ( self : List[str] ) -> Tuple: __UpperCAmelCase =Accelerator() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =create_components() ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) =accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertTrue(prepared_model in accelerator._models ) self.assertTrue(prepared_optimizer in accelerator._optimizers ) self.assertTrue(prepared_scheduler in accelerator._schedulers ) self.assertTrue(prepared_train_dl in accelerator._dataloaders ) self.assertTrue(prepared_valid_dl in accelerator._dataloaders ) def _a ( self : Any ) -> str: __UpperCAmelCase =Accelerator() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =create_components() accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) accelerator.free_memory() self.assertTrue(len(accelerator._models ) == 0 ) self.assertTrue(len(accelerator._optimizers ) == 0 ) self.assertTrue(len(accelerator._schedulers ) == 0 ) self.assertTrue(len(accelerator._dataloaders ) == 0 ) def _a ( self : str ) -> Tuple: PartialState._reset_state() # Mock torch.cuda.set_device to avoid an exception as the device doesn't exist def noop(*__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : Union[str, Any] ): pass with patch("""torch.cuda.set_device""" , __SCREAMING_SNAKE_CASE ), patch_environment(ACCELERATE_TORCH_DEVICE="""cuda:64""" ): __UpperCAmelCase =Accelerator() self.assertEqual(str(accelerator.state.device ) , """cuda:64""" ) def _a ( self : Tuple ) -> str: __UpperCAmelCase =Accelerator() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =create_components() accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =get_signature(__SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(__SCREAMING_SNAKE_CASE ) # make sure random weights don't match load_random_weights(__SCREAMING_SNAKE_CASE ) self.assertTrue(abs(model_signature - get_signature(__SCREAMING_SNAKE_CASE ) ) > 1e-3 ) # make sure loaded weights match accelerator.load_state(__SCREAMING_SNAKE_CASE ) self.assertTrue(abs(model_signature - get_signature(__SCREAMING_SNAKE_CASE ) ) < 1e-3 ) def _a ( self : Optional[int] ) -> List[Any]: __UpperCAmelCase =Accelerator() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =create_components() accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =get_signature(__SCREAMING_SNAKE_CASE ) # saving hook def save_config(__SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int] ): __UpperCAmelCase ={"""class_name""": models[0].__class__.__name__} with open(os.path.join(__SCREAMING_SNAKE_CASE , """data.json""" ) , """w""" ) as f: json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # loading hook def load_config(__SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] ): with open(os.path.join(__SCREAMING_SNAKE_CASE , """data.json""" ) , """r""" ) as f: __UpperCAmelCase =json.load(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =config["""class_name"""] __UpperCAmelCase =accelerator.register_save_state_pre_hook(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =accelerator.register_load_state_pre_hook(__SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(__SCREAMING_SNAKE_CASE ) # make sure random weights don't match with hooks load_random_weights(__SCREAMING_SNAKE_CASE ) self.assertTrue(abs(model_signature - get_signature(__SCREAMING_SNAKE_CASE ) ) > 1e-3 ) # random class name to verify correct one is loaded __UpperCAmelCase ="""random""" # make sure loaded weights match with hooks accelerator.load_state(__SCREAMING_SNAKE_CASE ) self.assertTrue(abs(model_signature - get_signature(__SCREAMING_SNAKE_CASE ) ) < 1e-3 ) # mode.class_name is loaded from config self.assertTrue(model.class_name == model.__class__.__name__ ) # remove hooks save_hook.remove() load_hook.remove() with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(__SCREAMING_SNAKE_CASE ) # make sure random weights don't match with hooks removed load_random_weights(__SCREAMING_SNAKE_CASE ) self.assertTrue(abs(model_signature - get_signature(__SCREAMING_SNAKE_CASE ) ) > 1e-3 ) # random class name to verify correct one is loaded __UpperCAmelCase ="""random""" # make sure loaded weights match with hooks removed accelerator.load_state(__SCREAMING_SNAKE_CASE ) self.assertTrue(abs(model_signature - get_signature(__SCREAMING_SNAKE_CASE ) ) < 1e-3 ) # mode.class_name is NOT loaded from config self.assertTrue(model.class_name != model.__class__.__name__ ) def _a ( self : str ) -> List[Any]: __UpperCAmelCase =Accelerator() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =create_components() __UpperCAmelCase =None # This should work __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =accelerator.prepare( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertTrue(dummy_obj is None ) def _a ( self : Dict ) -> Any: __UpperCAmelCase =Accelerator() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =create_components() __UpperCAmelCase =[1, 2, 3] # This should work __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =accelerator.prepare( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertEqual( getattr(__SCREAMING_SNAKE_CASE , """_is_accelerate_prepared""" , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , """Dummy object should have `_is_accelerate_prepared` set to `True`""" , ) self.assertEqual( getattr(__SCREAMING_SNAKE_CASE , """_is_accelerate_prepared""" , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , """Model is missing `_is_accelerator_prepared` or is set to `False`""" , ) self.assertEqual( getattr(__SCREAMING_SNAKE_CASE , """_is_accelerate_prepared""" , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , """Optimizer is missing `_is_accelerator_prepared` or is set to `False`""" , ) self.assertEqual( getattr(__SCREAMING_SNAKE_CASE , """_is_accelerate_prepared""" , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , """Scheduler is missing `_is_accelerator_prepared` or is set to `False`""" , ) self.assertEqual( getattr(__SCREAMING_SNAKE_CASE , """_is_accelerate_prepared""" , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , """Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , ) self.assertEqual( getattr(__SCREAMING_SNAKE_CASE , """_is_accelerate_prepared""" , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , """Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , ) @slow @require_bnb def _a ( self : Union[str, Any] ) -> str: from transformers import AutoModelForCausalLM __UpperCAmelCase =AutoModelForCausalLM.from_pretrained( """EleutherAI/gpt-neo-125m""" , load_in_abit=__SCREAMING_SNAKE_CASE , device_map={"""""": 0} , ) __UpperCAmelCase =Accelerator() # This should work __UpperCAmelCase =accelerator.prepare(__SCREAMING_SNAKE_CASE ) @slow @require_bnb def _a ( self : str ) -> str: from transformers import AutoModelForCausalLM __UpperCAmelCase =Accelerator() with init_empty_weights(): __UpperCAmelCase =AutoModelForCausalLM.from_pretrained( """EleutherAI/gpt-neo-125m""" , ) model.tie_weights() __UpperCAmelCase =infer_auto_device_map(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase ="""cpu""" __UpperCAmelCase =AutoModelForCausalLM.from_pretrained( """EleutherAI/gpt-neo-125m""" , device_map=__SCREAMING_SNAKE_CASE , load_in_abit=__SCREAMING_SNAKE_CASE , llm_inta_enable_fpaa_cpu_offload=__SCREAMING_SNAKE_CASE ) # This should not work and get value error with self.assertRaises(__SCREAMING_SNAKE_CASE ): __UpperCAmelCase =accelerator.prepare(__SCREAMING_SNAKE_CASE ) @slow @require_bnb @require_multi_gpu def _a ( self : str ) -> Any: from transformers import AutoModelForCausalLM __UpperCAmelCase ={"""distributed_type""": DistributedType.MULTI_GPU} with init_empty_weights(): __UpperCAmelCase =AutoModelForCausalLM.from_pretrained( """EleutherAI/gpt-neo-125m""" , ) model.tie_weights() __UpperCAmelCase =infer_auto_device_map(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =1 __UpperCAmelCase =AutoModelForCausalLM.from_pretrained( """EleutherAI/gpt-neo-125m""" , load_in_abit=__SCREAMING_SNAKE_CASE , device_map=__SCREAMING_SNAKE_CASE , ) __UpperCAmelCase =Accelerator() # This should not work and get value error with self.assertRaises(__SCREAMING_SNAKE_CASE ): __UpperCAmelCase =accelerator.prepare(__SCREAMING_SNAKE_CASE ) PartialState._reset_state() @slow @require_bnb @require_multi_gpu def _a ( self : Optional[Any] ) -> str: from transformers import AutoModelForCausalLM with init_empty_weights(): __UpperCAmelCase =AutoModelForCausalLM.from_pretrained( """EleutherAI/gpt-neo-125m""" , ) __UpperCAmelCase =infer_auto_device_map(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =1 __UpperCAmelCase =AutoModelForCausalLM.from_pretrained( """EleutherAI/gpt-neo-125m""" , load_in_abit=__SCREAMING_SNAKE_CASE , device_map=__SCREAMING_SNAKE_CASE , ) __UpperCAmelCase =Accelerator() # This should work __UpperCAmelCase =accelerator.prepare(__SCREAMING_SNAKE_CASE ) @require_cuda def _a ( self : str ) -> List[Any]: __UpperCAmelCase =torch.nn.Linear(10 , 10 ) __UpperCAmelCase =torch.optim.SGD(model.parameters() , lr=0.01 ) __UpperCAmelCase =Accelerator(cpu=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =accelerator.prepare(__SCREAMING_SNAKE_CASE )
68
import numpy as np def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" return 1 / (1 + np.exp(-vector )) if __name__ == "__main__": import doctest doctest.testmod()
43
0
'''simple docstring''' import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class SCREAMING_SNAKE_CASE__ : def __init__( self : Optional[int] , a_ : str = "cpu" , a_ : str = "openai/clip-vit-large-patch14" ): """simple docstring""" __snake_case = device __snake_case = CLIPTokenizerFast.from_pretrained(a_ ) __snake_case = [0.48145466, 0.4578275, 0.40821073] __snake_case = [0.26862954, 0.26130258, 0.27577711] __snake_case = torchvision.transforms.Normalize(self.image_mean , self.image_std ) __snake_case = torchvision.transforms.Resize(224 ) __snake_case = torchvision.transforms.CenterCrop(224 ) def A ( self : Tuple , a_ : int ): """simple docstring""" __snake_case = self.resize(a_ ) __snake_case = self.center_crop(a_ ) __snake_case = self.normalize(a_ ) return images def __call__( self : List[str] , a_ : Any=None , a_ : Any=None , **a_ : Optional[int] ): """simple docstring""" __snake_case = self.tokenizer(text=a_ , **a_ ) __snake_case = self.preprocess_img(a_ ) __snake_case = {key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__( self : List[str] , a_ : int=10 , a_ : str=0.01 , a_ : str=None , a_ : Optional[int]=None , a_ : Optional[Any]=None , a_ : Tuple=None , a_ : Tuple=None , a_ : List[str]=None , a_ : List[str]=False , a_ : List[str]=True , a_ : List[Any]="image" , a_ : str=True , a_ : List[str]=False , a_ : Any=False , a_ : Dict=False , ): """simple docstring""" super().__init__() __snake_case = None __snake_case = device if device else get_device() if vqgan: __snake_case = vqgan else: __snake_case = load_vqgan(self.device , conf_path=a_ , ckpt_path=a_ ) self.vqgan.eval() if clip: __snake_case = clip else: __snake_case = CLIPModel.from_pretrained("openai/clip-vit-base-patch32" ) self.clip.to(self.device ) __snake_case = ProcessorGradientFlow(device=self.device ) __snake_case = iterations __snake_case = lr __snake_case = log __snake_case = make_grid __snake_case = return_val __snake_case = quantize __snake_case = self.vqgan.decoder.z_shape def A ( self : Union[str, Any] , a_ : Dict=None , a_ : Dict=None , a_ : Any=5 , a_ : int=True ): """simple docstring""" __snake_case = [] if output_path is None: __snake_case = "./animation.gif" if input_path is None: __snake_case = self.save_path __snake_case = sorted(glob(input_path + "/*" ) ) if not len(a_ ): raise ValueError( "No images found in save path, aborting (did you pass save_intermediate=True to the generate" " function?)" ) if len(a_ ) == 1: print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)" ) __snake_case = total_duration / len(a_ ) __snake_case = [frame_duration] * len(a_ ) if extend_frames: __snake_case = 1.5 __snake_case = 3 for file_name in paths: if file_name.endswith(".png" ): images.append(imageio.imread(a_ ) ) imageio.mimsave(a_ , a_ , duration=a_ ) print(f'''gif saved to {output_path}''' ) def A ( self : Any , a_ : Union[str, Any]=None , a_ : Union[str, Any]=None ): """simple docstring""" if not (path or img): raise ValueError("Input either path or tensor" ) if img is not None: raise NotImplementedError __snake_case = preprocess(Image.open(a_ ) , target_image_size=256 ).to(self.device ) __snake_case = preprocess_vqgan(a_ ) __snake_case , *__snake_case = self.vqgan.encode(a_ ) return z def A ( self : List[str] , a_ : Dict ): """simple docstring""" __snake_case = self.latent.detach().requires_grad_() __snake_case = base_latent + transform_vector if self.quantize: __snake_case , *__snake_case = self.vqgan.quantize(a_ ) else: __snake_case = trans_latent return self.vqgan.decode(a_ ) def A ( self : str , a_ : Any , a_ : int , a_ : str=None ): """simple docstring""" __snake_case = self.clip_preprocessor(text=a_ , images=a_ , return_tensors="pt" , padding=a_ ) __snake_case = self.clip(**a_ ) __snake_case = clip_outputs.logits_per_image if weights is not None: __snake_case = similarity_logits * weights return similarity_logits.sum() def A ( self : int , a_ : List[str] , a_ : Union[str, Any] , a_ : Dict ): """simple docstring""" __snake_case = self._get_clip_similarity(pos_prompts["prompts"] , a_ , weights=(1 / pos_prompts["weights"]) ) if neg_prompts: __snake_case = self._get_clip_similarity(neg_prompts["prompts"] , a_ , weights=neg_prompts["weights"] ) else: __snake_case = torch.tensor([1] , device=self.device ) __snake_case = -torch.log(a_ ) + torch.log(a_ ) return loss def A ( self : str , a_ : List[str] , a_ : List[Any] , a_ : Union[str, Any] ): """simple docstring""" __snake_case = torch.randn_like(self.latent , requires_grad=a_ , device=self.device ) __snake_case = torch.optim.Adam([vector] , lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() __snake_case = self._add_vector(a_ ) __snake_case = loop_post_process(a_ ) __snake_case = self._get_CLIP_loss(a_ , a_ , a_ ) print("CLIP loss" , a_ ) if self.log: wandb.log({"CLIP Loss": clip_loss} ) clip_loss.backward(retain_graph=a_ ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def A ( self : Union[str, Any] , a_ : Dict , a_ : int , a_ : Tuple ): """simple docstring""" wandb.init(reinit=a_ , project="face-editor" ) wandb.config.update({"Positive Prompts": positive_prompts} ) wandb.config.update({"Negative Prompts": negative_prompts} ) wandb.config.update({"lr": self.lr, "iterations": self.iterations} ) if image_path: __snake_case = Image.open(a_ ) __snake_case = image.resize((256, 256) ) wandb.log("Original Image" , wandb.Image(a_ ) ) def A ( self : Tuple , a_ : Dict ): """simple docstring""" if not prompts: return [] __snake_case = [] __snake_case = [] if isinstance(a_ , a_ ): __snake_case = [prompt.strip() for prompt in prompts.split("|" )] for prompt in prompts: if isinstance(a_ , (tuple, list) ): __snake_case = prompt[0] __snake_case = float(prompt[1] ) elif ":" in prompt: __snake_case , __snake_case = prompt.split(":" ) __snake_case = float(a_ ) else: __snake_case = prompt __snake_case = 1.0 processed_prompts.append(a_ ) weights.append(a_ ) return { "prompts": processed_prompts, "weights": torch.tensor(a_ , device=self.device ), } def A ( self : List[Any] , a_ : Optional[Any] , a_ : Dict=None , a_ : Optional[Any]=None , a_ : List[Any]=True , a_ : List[str]=False , a_ : Any=True , a_ : Union[str, Any]=True , a_ : str=None , ): """simple docstring""" if image_path: __snake_case = self._get_latent(a_ ) else: __snake_case = torch.randn(self.latent_dim , device=self.device ) if self.log: self._init_logging(a_ , a_ , a_ ) assert pos_prompts, "You must provide at least one positive prompt." __snake_case = self.process_prompts(a_ ) __snake_case = self.process_prompts(a_ ) if save_final and save_path is None: __snake_case = os.path.join("./outputs/" , "_".join(pos_prompts["prompts"] ) ) if not os.path.exists(a_ ): os.makedirs(a_ ) else: __snake_case = save_path + "_" + get_timestamp() os.makedirs(a_ ) __snake_case = save_path __snake_case = self.vqgan.decode(self.latent )[0] if show_intermediate: print("Original Image" ) show_pil(custom_to_pil(a_ ) ) __snake_case = loop_post_process(a_ ) for iter, transformed_img in enumerate(self._optimize_CLIP(a_ , a_ , a_ ) ): if show_intermediate: show_pil(a_ ) if save_intermediate: transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}.png''' ) ) if self.log: wandb.log({"Image": wandb.Image(a_ )} ) if show_final: show_pil(a_ ) if save_final: transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}_final.png''' ) )
69
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = '▁' lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'} lowerCAmelCase = { 'vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model', }, 'monolingual_vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt', }, } lowerCAmelCase = {'vinai/bartpho-syllable': 1024} class _a ( UpperCamelCase__ ): _lowercase : Tuple = VOCAB_FILES_NAMES _lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP _lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase : Any = ['''input_ids''', '''attention_mask'''] def __init__( self: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any]="<s>" , UpperCamelCase_: List[Any]="</s>" , UpperCamelCase_: Optional[int]="</s>" , UpperCamelCase_: List[str]="<s>" , UpperCamelCase_: Optional[int]="<unk>" , UpperCamelCase_: Optional[int]="<pad>" , UpperCamelCase_: Optional[int]="<mask>" , UpperCamelCase_: Optional[Dict[str, Any]] = None , **UpperCamelCase_: int , ) -> None: """simple docstring""" lowercase__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) lowercase__ = vocab_file lowercase__ = monolingual_vocab_file lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCamelCase_ ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility lowercase__ = {} lowercase__ = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids: lowercase__ = cnt cnt += 1 with open(UpperCamelCase_ , '''r''' , encoding='''utf-8''' ) as f: for line in f.readlines(): lowercase__ = line.strip().split()[0] lowercase__ = len(self.fairseq_tokens_to_ids ) if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids: lowercase__ = len(self.fairseq_tokens_to_ids ) lowercase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self: Tuple ) -> int: """simple docstring""" lowercase__ = self.__dict__.copy() lowercase__ = None lowercase__ = self.sp_model.serialized_model_proto() return state def __setstate__( self: List[str] , UpperCamelCase_: int ) -> List[Any]: """simple docstring""" lowercase__ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowercase__ = {} lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase__ = [self.cls_token_id] lowercase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1] def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowerCamelCase_ ( self: List[str] ) -> List[str]: """simple docstring""" return len(self.fairseq_ids_to_tokens ) def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]: """simple docstring""" lowercase__ = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCamelCase_ ( self: int , UpperCamelCase_: str ) -> List[str]: """simple docstring""" return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ ) def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Any ) -> Dict: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def lowerCamelCase_ ( self: str , UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return self.fairseq_ids_to_tokens[index] def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: int ) -> Dict: """simple docstring""" lowercase__ = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip() return out_string def lowerCamelCase_ ( self: Any , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(UpperCamelCase_ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return lowercase__ = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase__ = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase_ , '''wb''' ) as fi: lowercase__ = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_ ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( UpperCamelCase_ ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , UpperCamelCase_ ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'{str(UpperCamelCase_ )} \n' ) return out_vocab_file, out_monolingual_vocab_file
43
0
import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class A( UpperCamelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase = BertTokenizer UpperCamelCase = BertTokenizerFast UpperCamelCase = True UpperCamelCase = True UpperCamelCase = filter_non_english def a__ ( self : List[Any] ) -> int: """simple docstring""" super().setUp() lowerCamelCase_ = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def a__ ( self : Tuple , A_ : List[str] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = 'UNwant\u00E9d,running' lowerCamelCase_ = 'unwanted, running' return input_text, output_text def a__ ( self : Any ) -> Tuple: """simple docstring""" lowerCamelCase_ = self.tokenizer_class(self.vocab_file ) lowerCamelCase_ = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(A_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [9, 6, 7, 12, 10, 11] ) def a__ ( self : Tuple ) -> List[str]: """simple docstring""" if not self.test_rust_tokenizer: return lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = self.get_rust_tokenizer() lowerCamelCase_ = 'UNwant\u00E9d,running' lowerCamelCase_ = tokenizer.tokenize(A_ ) lowerCamelCase_ = rust_tokenizer.tokenize(A_ ) self.assertListEqual(A_ , A_ ) lowerCamelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ ) lowerCamelCase_ = rust_tokenizer.encode(A_ , add_special_tokens=A_ ) self.assertListEqual(A_ , A_ ) lowerCamelCase_ = self.get_rust_tokenizer() lowerCamelCase_ = tokenizer.encode(A_ ) lowerCamelCase_ = rust_tokenizer.encode(A_ ) self.assertListEqual(A_ , A_ ) # With lower casing lowerCamelCase_ = self.get_tokenizer(do_lower_case=A_ ) lowerCamelCase_ = self.get_rust_tokenizer(do_lower_case=A_ ) lowerCamelCase_ = 'UNwant\u00E9d,running' lowerCamelCase_ = tokenizer.tokenize(A_ ) lowerCamelCase_ = rust_tokenizer.tokenize(A_ ) self.assertListEqual(A_ , A_ ) lowerCamelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ ) lowerCamelCase_ = rust_tokenizer.encode(A_ , add_special_tokens=A_ ) self.assertListEqual(A_ , A_ ) lowerCamelCase_ = self.get_rust_tokenizer() lowerCamelCase_ = tokenizer.encode(A_ ) lowerCamelCase_ = rust_tokenizer.encode(A_ ) self.assertListEqual(A_ , A_ ) def a__ ( self : Any ) -> Dict: """simple docstring""" lowerCamelCase_ = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] ) def a__ ( self : Dict ) -> int: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def a__ ( self : str ) -> int: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] ) def a__ ( self : Optional[int] ) -> Any: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def a__ ( self : Tuple ) -> str: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def a__ ( self : int ) -> List[Any]: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def a__ ( self : str ) -> List[str]: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] ) def a__ ( self : Dict ) -> Any: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] ) def a__ ( self : int ) -> Any: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , never_split=['[UNK]'] ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] ) def a__ ( self : List[Any] ) -> int: """simple docstring""" lowerCamelCase_ = BasicTokenizer() lowerCamelCase_ = 'a\n\'ll !!to?\'d of, can\'t.' lowerCamelCase_ = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.'] self.assertListEqual(tokenizer.tokenize(A_ ) , A_ ) def a__ ( self : Optional[int] ) -> Dict: """simple docstring""" lowerCamelCase_ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] lowerCamelCase_ = {} for i, token in enumerate(A_ ): lowerCamelCase_ = i lowerCamelCase_ = WordpieceTokenizer(vocab=A_ , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] ) self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] ) def a__ ( self : Optional[Any] ) -> str: """simple docstring""" self.assertTrue(_is_whitespace(' ' ) ) self.assertTrue(_is_whitespace('\t' ) ) self.assertTrue(_is_whitespace('\r' ) ) self.assertTrue(_is_whitespace('\n' ) ) self.assertTrue(_is_whitespace('\u00A0' ) ) self.assertFalse(_is_whitespace('A' ) ) self.assertFalse(_is_whitespace('-' ) ) def a__ ( self : List[Any] ) -> int: """simple docstring""" self.assertTrue(_is_control('\u0005' ) ) self.assertFalse(_is_control('A' ) ) self.assertFalse(_is_control(' ' ) ) self.assertFalse(_is_control('\t' ) ) self.assertFalse(_is_control('\r' ) ) def a__ ( self : List[str] ) -> Optional[Any]: """simple docstring""" self.assertTrue(_is_punctuation('-' ) ) self.assertTrue(_is_punctuation('$' ) ) self.assertTrue(_is_punctuation('`' ) ) self.assertTrue(_is_punctuation('.' ) ) self.assertFalse(_is_punctuation('A' ) ) self.assertFalse(_is_punctuation(' ' ) ) def a__ ( self : int ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) self.assertListEqual( [rust_tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) @slow def a__ ( self : Any ) -> int: """simple docstring""" lowerCamelCase_ = self.tokenizer_class.from_pretrained('bert-base-uncased' ) lowerCamelCase_ = tokenizer.encode('sequence builders' , add_special_tokens=A_ ) lowerCamelCase_ = tokenizer.encode('multi-sequence build' , add_special_tokens=A_ ) lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(A_ ) lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(A_ , A_ ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def a__ ( self : str ) -> str: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ ) lowerCamelCase_ = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence.""" lowerCamelCase_ = tokenizer_r.encode_plus( A_ , return_attention_mask=A_ , return_token_type_ids=A_ , return_offsets_mapping=A_ , add_special_tokens=A_ , ) lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(A_ , 'do_lower_case' ) else False lowerCamelCase_ = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'A'), ((1, 2), ','), ((3, 5), 'na'), ((5, 6), '##ï'), ((6, 8), '##ve'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'Allen'), ((21, 23), '##NL'), ((23, 24), '##P'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'a'), ((1, 2), ','), ((3, 8), 'naive'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'allen'), ((21, 23), '##nl'), ((23, 24), '##p'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] ) def a__ ( self : List[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ = ['的', '人', '有'] lowerCamelCase_ = ''.join(A_ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowerCamelCase_ = True lowerCamelCase_ = self.tokenizer_class.from_pretrained(A_ , **A_ ) lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ ) lowerCamelCase_ = tokenizer_p.encode(A_ , add_special_tokens=A_ ) lowerCamelCase_ = tokenizer_r.encode(A_ , add_special_tokens=A_ ) lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(A_ ) lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(A_ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(A_ , A_ ) self.assertListEqual(A_ , A_ ) lowerCamelCase_ = False lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ ) lowerCamelCase_ = self.tokenizer_class.from_pretrained(A_ , **A_ ) lowerCamelCase_ = tokenizer_r.encode(A_ , add_special_tokens=A_ ) lowerCamelCase_ = tokenizer_p.encode(A_ , add_special_tokens=A_ ) lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(A_ ) lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(A_ ) # it is expected that only the first Chinese character is not preceded by "##". lowerCamelCase_ = [ f"""##{token}""" if idx != 0 else token for idx, token in enumerate(A_ ) ] self.assertListEqual(A_ , A_ ) self.assertListEqual(A_ , A_ )
70
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase = logging.get_logger(__name__) def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = original_name.split('''.''' )[0] lowercase__ = key.split('''.''' ) lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 2] ) lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 1] ) lowercase__ = orig_block_num - offset lowercase__ = key.replace(f'{orig_block_num}.{layer_num}.{original_name}' , f'block.{new_block_num}.{layer_num}.{new_name}' ) return key def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = OrderedDict() lowercase__ , lowercase__ = 0, 0 for key, value in state_dict.items(): if key.startswith('''network''' ): lowercase__ = key.replace('''network''' , '''poolformer.encoder''' ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith('''bias''' ) and "patch_embed" not in key: patch_emb_offset += 1 lowercase__ = key[: key.find('''proj''' )] lowercase__ = key.replace(SCREAMING_SNAKE_CASE , f'patch_embeddings.{total_embed_found}.' ) lowercase__ = key.replace('''proj''' , '''projection''' ) if key.endswith('''bias''' ): total_embed_found += 1 if "patch_embeddings" in key: lowercase__ = '''poolformer.encoder.''' + key if "mlp.fc1" in key: lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc1''' , '''output.conv1''' ) if "mlp.fc2" in key: lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc2''' , '''output.conv2''' ) if "norm1" in key: lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm1''' , '''before_norm''' ) if "norm2" in key: lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm2''' , '''after_norm''' ) if "layer_scale_1" in key: lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_1''' , '''layer_scale_1''' ) if "layer_scale_2" in key: lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_2''' , '''layer_scale_2''' ) if "head" in key: lowercase__ = key.replace('''head''' , '''classifier''' ) lowercase__ = value return new_state_dict def _a ( ): """simple docstring""" lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ) return image @torch.no_grad() def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = PoolFormerConfig() # set attributes based on model_name lowercase__ = '''huggingface/label-files''' lowercase__ = model_name[-3:] lowercase__ = 10_00 lowercase__ = '''imagenet-1k-id2label.json''' lowercase__ = (1, 10_00) # set config attributes lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) ) lowercase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowercase__ = idalabel lowercase__ = {v: k for k, v in idalabel.items()} if size == "s12": lowercase__ = [2, 2, 6, 2] lowercase__ = [64, 1_28, 3_20, 5_12] lowercase__ = 4.0 lowercase__ = 0.9 elif size == "s24": lowercase__ = [4, 4, 12, 4] lowercase__ = [64, 1_28, 3_20, 5_12] lowercase__ = 4.0 lowercase__ = 0.9 elif size == "s36": lowercase__ = [6, 6, 18, 6] lowercase__ = [64, 1_28, 3_20, 5_12] lowercase__ = 4.0 lowercase__ = 1E-6 lowercase__ = 0.9 elif size == "m36": lowercase__ = [6, 6, 18, 6] lowercase__ = [96, 1_92, 3_84, 7_68] lowercase__ = 4.0 lowercase__ = 1E-6 lowercase__ = 0.95 elif size == "m48": lowercase__ = [8, 8, 24, 8] lowercase__ = [96, 1_92, 3_84, 7_68] lowercase__ = 4.0 lowercase__ = 1E-6 lowercase__ = 0.95 else: raise ValueError(f'Size {size} not supported' ) # load image processor lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE ) # Prepare image lowercase__ = prepare_img() lowercase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values logger.info(f'Converting model {model_name}...' ) # load original state dict lowercase__ = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device('''cpu''' ) ) # rename keys lowercase__ = rename_keys(SCREAMING_SNAKE_CASE ) # create HuggingFace model and load state dict lowercase__ = PoolFormerForImageClassification(SCREAMING_SNAKE_CASE ) model.load_state_dict(SCREAMING_SNAKE_CASE ) model.eval() # Define image processor lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE ) lowercase__ = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values # forward pass lowercase__ = model(SCREAMING_SNAKE_CASE ) lowercase__ = outputs.logits # define expected logit slices for different models if size == "s12": lowercase__ = torch.tensor([-0.3_045, -0.6_758, -0.4_869] ) elif size == "s24": lowercase__ = torch.tensor([0.4_402, -0.1_374, -0.8_045] ) elif size == "s36": lowercase__ = torch.tensor([-0.6_080, -0.5_133, -0.5_898] ) elif size == "m36": lowercase__ = torch.tensor([0.3_952, 0.2_263, -1.2_668] ) elif size == "m48": lowercase__ = torch.tensor([0.1_167, -0.0_656, -0.3_423] ) else: raise ValueError(f'Size {size} not supported' ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-2 ) # finally, save model and image processor logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' ) Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE ) model.save_pretrained(SCREAMING_SNAKE_CASE ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '--model_name', default='poolformer_s12', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) lowerCAmelCase = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
43
0
'''simple docstring''' import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class _snake_case (unittest.TestCase): @slow def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" ) UpperCAmelCase_ : Tuple = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" ) model.to(_snake_case ) from datasets import load_dataset UpperCAmelCase_ : Tuple = load_dataset("nielsr/rvlcdip-demo" ) UpperCAmelCase_ : Dict = dataset["train"][0]["image"].convert("RGB" ) UpperCAmelCase_ : Optional[Any] = image_processor(_snake_case ,return_tensors="pt" ).to(_snake_case ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Optional[int] = model(**_snake_case ) UpperCAmelCase_ : Any = outputs.logits UpperCAmelCase_ : List[Any] = torch.Size((1, 16) ) self.assertEqual(logits.shape ,_snake_case ) UpperCAmelCase_ : Dict = torch.tensor( [-0.4158, -0.4092, -0.4347] ,device=_snake_case ,dtype=torch.float ,) self.assertTrue(torch.allclose(logits[0, :3] ,_snake_case ,atol=1E-4 ) )
71
import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) lowerCAmelCase = logging.getLogger() def _a ( ): """simple docstring""" lowercase__ = argparse.ArgumentParser() parser.add_argument('''-f''' ) lowercase__ = parser.parse_args() return args.f def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = {} lowercase__ = os.path.join(SCREAMING_SNAKE_CASE , '''all_results.json''' ) if os.path.exists(SCREAMING_SNAKE_CASE ): with open(SCREAMING_SNAKE_CASE , '''r''' ) as f: lowercase__ = json.load(SCREAMING_SNAKE_CASE ) else: raise ValueError(f'can\'t find {path}' ) return results def _a ( ): """simple docstring""" lowercase__ = torch.cuda.is_available() and torch_device == '''cuda''' return is_using_cuda and is_apex_available() lowerCAmelCase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class _a ( UpperCamelCase__ ): @classmethod def lowerCamelCase_ ( cls: int ) -> Any: """simple docstring""" lowercase__ = tempfile.mkdtemp() lowercase__ = os.path.join(cls.tmpdir , '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) lowercase__ = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def lowerCamelCase_ ( cls: Optional[Any] ) -> Dict: """simple docstring""" shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''' ) run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''glue_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Tuple ) -> Any: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertLess(result['''perplexity'''] , 100 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''clm_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Dict ) -> Optional[Any]: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertLess(result['''perplexity'''] , 42 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''mlm_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Any ) -> int: """simple docstring""" lowercase__ = 7 if get_gpu_count() > 1 else 2 lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertLess(result['''train_loss'''] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''ner_no_trainer''' ) ) ) @unittest.skip(reason='''Fix me @muellerzr''' ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Union[str, Any] ) -> int: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result['''eval_f1'''] , 28 ) self.assertGreaterEqual(result['''eval_exact'''] , 28 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''qa_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: int ) -> str: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''swag_no_trainer''' ) ) ) @slow @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Tuple ) -> Any: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertGreaterEqual(result['''eval_rouge1'''] , 10 ) self.assertGreaterEqual(result['''eval_rouge2'''] , 2 ) self.assertGreaterEqual(result['''eval_rougeL'''] , 7 ) self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''summarization_no_trainer''' ) ) ) @slow @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertGreaterEqual(result['''eval_bleu'''] , 30 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''translation_no_trainer''' ) ) ) @slow def lowerCamelCase_ ( self: Optional[int] ) -> Dict: """simple docstring""" lowercase__ = logging.StreamHandler(sys.stdout ) logger.addHandler(UpperCamelCase_ ) lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[Any]: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''' ) run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) # The base model scores a 25% self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''step_1''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''image_classification_no_trainer''' ) ) )
43
0
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : List[Any] = logging.get_logger(__name__) _UpperCAmelCase : List[Any] = { '''microsoft/unispeech-large-1500h-cv''': ( '''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json''' ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class __magic_name__ ( __SCREAMING_SNAKE_CASE ): UpperCamelCase__ = 'unispeech' def __init__( self , snake_case_=32 , snake_case_=7_68 , snake_case_=12 , snake_case_=12 , snake_case_=30_72 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_="group" , snake_case_="gelu" , snake_case_=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , snake_case_=(5, 2, 2, 2, 2, 2, 2) , snake_case_=(10, 3, 3, 3, 3, 2, 2) , snake_case_=False , snake_case_=1_28 , snake_case_=16 , snake_case_=False , snake_case_=True , snake_case_=0.05 , snake_case_=10 , snake_case_=2 , snake_case_=0.0 , snake_case_=10 , snake_case_=0 , snake_case_=3_20 , snake_case_=2 , snake_case_=0.1 , snake_case_=1_00 , snake_case_=2_56 , snake_case_=2_56 , snake_case_=0.1 , snake_case_="mean" , snake_case_=False , snake_case_=False , snake_case_=2_56 , snake_case_=80 , snake_case_=0 , snake_case_=1 , snake_case_=2 , snake_case_=0.5 , **snake_case_ , ): super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ ) lowercase =hidden_size lowercase =feat_extract_norm lowercase =feat_extract_activation lowercase =list(snake_case_ ) lowercase =list(snake_case_ ) lowercase =list(snake_case_ ) lowercase =conv_bias lowercase =num_conv_pos_embeddings lowercase =num_conv_pos_embedding_groups lowercase =len(self.conv_dim ) lowercase =num_hidden_layers lowercase =intermediate_size lowercase =hidden_act lowercase =num_attention_heads lowercase =hidden_dropout lowercase =attention_dropout lowercase =activation_dropout lowercase =feat_proj_dropout lowercase =final_dropout lowercase =layerdrop lowercase =layer_norm_eps lowercase =initializer_range lowercase =num_ctc_classes lowercase =vocab_size lowercase =do_stable_layer_norm lowercase =use_weighted_layer_sum lowercase =classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,' f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowercase =apply_spec_augment lowercase =mask_time_prob lowercase =mask_time_length lowercase =mask_time_min_masks lowercase =mask_feature_prob lowercase =mask_feature_length lowercase =mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowercase =num_codevectors_per_group lowercase =num_codevector_groups lowercase =contrastive_logits_temperature lowercase =feat_quantizer_dropout lowercase =num_negatives lowercase =codevector_dim lowercase =proj_codevector_dim lowercase =diversity_loss_weight # ctc loss lowercase =ctc_loss_reduction lowercase =ctc_zero_infinity # pretraining loss lowercase =replace_prob @property def _A( self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
72
from ...utils import logging from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel from .configuration_mta import MTaConfig lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = 'T5Config' class _a ( UpperCamelCase__ ): _lowercase : Optional[int] = '''mt5''' _lowercase : str = MTaConfig class _a ( UpperCamelCase__ ): _lowercase : Optional[Any] = '''mt5''' _lowercase : Optional[Any] = MTaConfig class _a ( UpperCamelCase__ ): _lowercase : Tuple = '''mt5''' _lowercase : Optional[Any] = MTaConfig
43
0
import json import logging import os import sys from time import time from unittest.mock import patch from transformers.testing_utils import TestCasePlus, require_torch_tpu logging.basicConfig(level=logging.DEBUG) a_ : Any = logging.getLogger() def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , 'all_results.json') if os.path.exists(_UpperCAmelCase): with open(_UpperCAmelCase , 'r') as f: SCREAMING_SNAKE_CASE = json.load(_UpperCAmelCase) else: raise ValueError(F'''can\'t find {path}''') return results a_ : Union[str, Any] = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @require_torch_tpu class _snake_case ( A__ ): def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: import xla_spawn SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir() SCREAMING_SNAKE_CASE = f''' ./examples/pytorch/text-classification/run_glue.py --num_cores=8 ./examples/pytorch/text-classification/run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --overwrite_output_dir --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --do_train --do_eval --debug tpu_metrics_debug --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --max_steps=10 --warmup_steps=2 --seed=42 --max_seq_length=128 '''.split() with patch.object(a , 'argv' , a): SCREAMING_SNAKE_CASE = time() xla_spawn.main() SCREAMING_SNAKE_CASE = time() SCREAMING_SNAKE_CASE = get_results(a) self.assertGreaterEqual(result['eval_accuracy'] , 0.75) # Assert that the script takes less than 500 seconds to make sure it doesn't hang. self.assertLess(end - start , 500) def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: import xla_spawn SCREAMING_SNAKE_CASE = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split() with patch.object(a , 'argv' , a): xla_spawn.main()
73
from datetime import datetime import matplotlib.pyplot as plt import torch def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" for param in module.parameters(): lowercase__ = False def _a ( ): """simple docstring""" lowercase__ = '''cuda''' if torch.cuda.is_available() else '''cpu''' if torch.backends.mps.is_available() and torch.backends.mps.is_built(): lowercase__ = '''mps''' if device == "mps": print( '''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch''' ''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues''' ''' with generations.''' ) return device def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = plt.imshow(SCREAMING_SNAKE_CASE ) fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE ) fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE ) plt.show() def _a ( ): """simple docstring""" lowercase__ = datetime.now() lowercase__ = current_time.strftime('''%H:%M:%S''' ) return timestamp
43
0
import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg''' __SCREAMING_SNAKE_CASE : int = Image.open(requests.get(snake_case , stream=snake_case ).raw ).convert('''RGB''' ) __SCREAMING_SNAKE_CASE : Optional[int] = transforms.Compose( [ transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ), ] ) __SCREAMING_SNAKE_CASE : Any = transform(snake_case ).unsqueeze(0 ).to(snake_case ) return image def a__ ( snake_case ): """simple docstring""" if "visual_encoder" in key: __SCREAMING_SNAKE_CASE : Dict = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , snake_case ) if "blocks" in key: __SCREAMING_SNAKE_CASE : Tuple = re.sub(R'''blocks''' , '''layers''' , snake_case ) if "attn" in key: __SCREAMING_SNAKE_CASE : int = re.sub(R'''attn''' , '''self_attn''' , snake_case ) if "norm1" in key: __SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(R'''norm1''' , '''layer_norm1''' , snake_case ) if "norm2" in key: __SCREAMING_SNAKE_CASE : str = re.sub(R'''norm2''' , '''layer_norm2''' , snake_case ) if "encoder.norm" in key: __SCREAMING_SNAKE_CASE : str = re.sub(R'''encoder.norm''' , '''post_layernorm''' , snake_case ) if "encoder.patch_embed.proj" in key: __SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(R'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , snake_case ) if "encoder.pos_embed" in key: __SCREAMING_SNAKE_CASE : Any = re.sub(R'''encoder.pos_embed''' , '''embeddings.position_embedding''' , snake_case ) if "encoder.cls_token" in key: __SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(R'''encoder.cls_token''' , '''embeddings.class_embedding''' , snake_case ) if "self_attn" in key: __SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(R'''self_attn.proj''' , '''self_attn.projection''' , snake_case ) return key @torch.no_grad() def a__ ( snake_case , snake_case=None ): """simple docstring""" if config_path is not None: __SCREAMING_SNAKE_CASE : List[str] = BlipConfig.from_pretrained(snake_case ) else: __SCREAMING_SNAKE_CASE : Tuple = BlipConfig(projection_dim=512 , text_config={} , vision_config={} ) __SCREAMING_SNAKE_CASE : Optional[Any] = BlipForConditionalGeneration(snake_case ).eval() __SCREAMING_SNAKE_CASE : Tuple = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth''' __SCREAMING_SNAKE_CASE : Optional[Any] = blip_decoder(pretrained=snake_case , image_size=384 , vit='''base''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = pt_model.eval() __SCREAMING_SNAKE_CASE : Tuple = pt_model.state_dict() for key in modified_state_dict.copy(): __SCREAMING_SNAKE_CASE : List[str] = modified_state_dict.pop(snake_case ) __SCREAMING_SNAKE_CASE : int = rename_key(snake_case ) __SCREAMING_SNAKE_CASE : Optional[Any] = value hf_model.load_state_dict(snake_case ) __SCREAMING_SNAKE_CASE : int = 384 __SCREAMING_SNAKE_CASE : List[Any] = load_demo_image(image_size=snake_case , device='''cpu''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' ) __SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(['''a picture of'''] ).input_ids __SCREAMING_SNAKE_CASE : Dict = hf_model.generate(snake_case , snake_case ) assert out[0].tolist() == [30_522, 1_037, 3_861, 1_997, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102] __SCREAMING_SNAKE_CASE : Optional[Any] = hf_model.generate(snake_case ) assert out[0].tolist() == [30_522, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(snake_case ) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' __SCREAMING_SNAKE_CASE : Dict = ( '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth''' ) __SCREAMING_SNAKE_CASE : Dict = blip_vqa(pretrained=snake_case , image_size=snake_case , vit='''base''' ) vqa_model.eval() __SCREAMING_SNAKE_CASE : Optional[int] = vqa_model.state_dict() for key in modified_state_dict.copy(): __SCREAMING_SNAKE_CASE : List[str] = modified_state_dict.pop(snake_case ) __SCREAMING_SNAKE_CASE : str = rename_key(snake_case ) __SCREAMING_SNAKE_CASE : Optional[int] = value __SCREAMING_SNAKE_CASE : str = BlipForQuestionAnswering(snake_case ) hf_vqa_model.load_state_dict(snake_case ) __SCREAMING_SNAKE_CASE : Dict = ['''How many dogs are in this image?'''] __SCREAMING_SNAKE_CASE : Any = tokenizer(snake_case , return_tensors='''pt''' ).input_ids __SCREAMING_SNAKE_CASE : Tuple = hf_vqa_model.generate(snake_case , snake_case ) print(tokenizer.decode(answer[0] ) ) assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth''' __SCREAMING_SNAKE_CASE : str = blip_itm(pretrained=snake_case , image_size=snake_case , vit='''base''' ) itm_model.eval() __SCREAMING_SNAKE_CASE : List[str] = itm_model.state_dict() for key in modified_state_dict.copy(): __SCREAMING_SNAKE_CASE : List[str] = modified_state_dict.pop(snake_case ) __SCREAMING_SNAKE_CASE : Tuple = rename_key(snake_case ) __SCREAMING_SNAKE_CASE : Tuple = value __SCREAMING_SNAKE_CASE : int = BlipForImageTextRetrieval(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = ['''A picture of a woman with a dog sitting in a beach'''] __SCREAMING_SNAKE_CASE : Any = tokenizer( snake_case , return_tensors='''pt''' , padding='''max_length''' , truncation=snake_case , max_length=35 , ).input_ids hf_itm_model.load_state_dict(snake_case ) hf_itm_model.eval() __SCREAMING_SNAKE_CASE : List[Any] = hf_itm_model(snake_case , snake_case , use_itm_head=snake_case ) __SCREAMING_SNAKE_CASE : int = hf_itm_model(snake_case , snake_case , use_itm_head=snake_case ) assert out[0].item() == 0.2110_6874_9427_7954 assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") lowercase_ = parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
74
from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _a : def __init__( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Optional[Any]=13 , UpperCamelCase_: Any=30 , UpperCamelCase_: Union[str, Any]=2 , UpperCamelCase_: Tuple=3 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[Any]=32 , UpperCamelCase_: int=2 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=37 , UpperCamelCase_: int="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Optional[int]=10 , UpperCamelCase_: List[str]=0.02 , UpperCamelCase_: List[Any]=3 , UpperCamelCase_: Any=0.6 , UpperCamelCase_: Any=None , ) -> str: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = image_size lowercase__ = patch_size lowercase__ = num_channels lowercase__ = is_training lowercase__ = use_labels lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = type_sequence_label_size lowercase__ = initializer_range lowercase__ = mask_ratio lowercase__ = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowercase__ = (image_size // patch_size) ** 2 lowercase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def lowerCamelCase_ ( self: List[str] ) -> str: """simple docstring""" lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self: Dict ) -> List[str]: """simple docstring""" return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , UpperCamelCase_: List[Any] ) -> Optional[Any]: """simple docstring""" lowercase__ = TFViTMAEModel(config=UpperCamelCase_ ) lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: Any ) -> Union[str, Any]: """simple docstring""" lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ ) lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ ) # expected sequence length = num_patches lowercase__ = (self.image_size // self.patch_size) ** 2 lowercase__ = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images lowercase__ = 1 lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ ) lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ ) lowercase__ = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def lowerCamelCase_ ( self: str ) -> int: """simple docstring""" lowercase__ = self.prepare_config_and_inputs() ((lowercase__) , (lowercase__) , (lowercase__)) = config_and_inputs lowercase__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): _lowercase : int = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () _lowercase : List[str] = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {} _lowercase : Optional[int] = False _lowercase : List[str] = False _lowercase : Optional[int] = False _lowercase : Optional[int] = False def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]: """simple docstring""" lowercase__ = TFViTMAEModelTester(self ) lowercase__ = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 ) def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''ViTMAE does not use inputs_embeds''' ) def lowerCamelCase_ ( self: Dict ) -> List[str]: """simple docstring""" pass def lowerCamelCase_ ( self: List[Any] ) -> List[Any]: """simple docstring""" lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(UpperCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) lowercase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase_ , tf.keras.layers.Layer ) ) def lowerCamelCase_ ( self: Optional[int] ) -> List[str]: """simple docstring""" lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(UpperCamelCase_ ) lowercase__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ = [*signature.parameters.keys()] lowercase__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCamelCase_ ) def lowerCamelCase_ ( self: Tuple ) -> int: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def lowerCamelCase_ ( self: int ) -> str: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase_ ) def lowerCamelCase_ ( self: Union[str, Any] ) -> Any: """simple docstring""" np.random.seed(2 ) lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ = int((config.image_size // config.patch_size) ** 2 ) lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowercase__ = model_class(UpperCamelCase_ ) lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ ) lowercase__ = copy.deepcopy(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) ) lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ ) lowercase__ = outputs_dict[0].numpy() lowercase__ = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 ) def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]: """simple docstring""" np.random.seed(2 ) lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ = int((config.image_size // config.patch_size) ** 2 ) lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(UpperCamelCase_: List[Any] ): lowercase__ = {} for k, v in inputs_dict.items(): if tf.is_tensor(UpperCamelCase_ ): lowercase__ = v.numpy() else: lowercase__ = np.array(UpperCamelCase_ ) return inputs_np_dict for model_class in self.all_model_classes: lowercase__ = model_class(UpperCamelCase_ ) lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = prepare_numpy_arrays(UpperCamelCase_ ) lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ ) lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ ) self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase_ ( self: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] , UpperCamelCase_: Tuple ) -> str: """simple docstring""" np.random.seed(2 ) lowercase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowercase__ = tf.constant(UpperCamelCase_ ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowercase__ = tf_noise super().check_pt_tf_models(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase_ ( self: Dict ) -> Dict: """simple docstring""" np.random.seed(2 ) lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(UpperCamelCase_ ) if module_member_name.endswith('''MainLayer''' ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )] for module_member in (getattr(UpperCamelCase_ , UpperCamelCase_ ),) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(UpperCamelCase_ , '''_keras_serializable''' , UpperCamelCase_ ) } lowercase__ = int((config.image_size // config.patch_size) ** 2 ) lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowercase__ = tf.convert_to_tensor(UpperCamelCase_ ) inputs_dict.update({'''noise''': noise} ) for main_layer_class in tf_main_layer_classes: lowercase__ = main_layer_class(UpperCamelCase_ ) lowercase__ = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } lowercase__ = tf.keras.Model(UpperCamelCase_ , outputs=main_layer(UpperCamelCase_ ) ) lowercase__ = model(UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase__ = os.path.join(UpperCamelCase_ , '''keras_model.h5''' ) model.save(UpperCamelCase_ ) lowercase__ = tf.keras.models.load_model( UpperCamelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(UpperCamelCase_ , tf.keras.Model ) lowercase__ = model(UpperCamelCase_ ) self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ ) @slow def lowerCamelCase_ ( self: List[Any] ) -> Optional[Any]: """simple docstring""" np.random.seed(2 ) lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ = int((config.image_size // config.patch_size) ** 2 ) lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowercase__ = model_class(UpperCamelCase_ ) lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ ) if model_class.__name__ == "TFViTMAEModel": lowercase__ = outputs.last_hidden_state.numpy() lowercase__ = 0 else: lowercase__ = outputs.logits.numpy() lowercase__ = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase_ , saved_model=UpperCamelCase_ ) lowercase__ = model_class.from_pretrained(UpperCamelCase_ ) lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ ) if model_class.__name__ == "TFViTMAEModel": lowercase__ = after_outputs['''last_hidden_state'''].numpy() lowercase__ = 0 else: lowercase__ = after_outputs['''logits'''].numpy() lowercase__ = 0 lowercase__ = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(UpperCamelCase_ , 1E-5 ) def lowerCamelCase_ ( self: Tuple ) -> List[Any]: """simple docstring""" np.random.seed(2 ) lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ = int((config.image_size // config.patch_size) ** 2 ) lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowercase__ = model_class(UpperCamelCase_ ) lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ ) lowercase__ = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(UpperCamelCase_ ) lowercase__ = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config lowercase__ = model_class.from_config(model.config ) lowercase__ = new_model(UpperCamelCase_ ) # Build model new_model.set_weights(model.get_weights() ) lowercase__ = new_model(UpperCamelCase_ , noise=UpperCamelCase_ ) self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ ) @unittest.skip( reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.''' ) def lowerCamelCase_ ( self: Optional[int] ) -> str: """simple docstring""" pass @unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' ) def lowerCamelCase_ ( self: Any ) -> Dict: """simple docstring""" pass @slow def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]: """simple docstring""" lowercase__ = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' ) self.assertIsNotNone(UpperCamelCase_ ) def _a ( ): """simple docstring""" lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class _a ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self: Tuple ) -> Tuple: """simple docstring""" return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None @slow def lowerCamelCase_ ( self: int ) -> Optional[int]: """simple docstring""" np.random.seed(2 ) lowercase__ = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ) lowercase__ = self.default_image_processor lowercase__ = prepare_img() lowercase__ = image_processor(images=UpperCamelCase_ , return_tensors='''tf''' ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowercase__ = ViTMAEConfig() lowercase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) lowercase__ = np.random.uniform(size=(1, num_patches) ) # forward pass lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ ) # verify the logits lowercase__ = tf.convert_to_tensor([1, 196, 768] ) self.assertEqual(outputs.logits.shape , UpperCamelCase_ ) lowercase__ = tf.convert_to_tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , UpperCamelCase_ , atol=1E-4 )
43
0
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCamelCase__ = { '''configuration_efficientnet''': [ '''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''EfficientNetConfig''', '''EfficientNetOnnxConfig''', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = ['''EfficientNetImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ '''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''EfficientNetForImageClassification''', '''EfficientNetModel''', '''EfficientNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_efficientnet import ( EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientNetConfig, EfficientNetOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientnet import EfficientNetImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientnet import ( EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientNetForImageClassification, EfficientNetModel, EfficientNetPreTrainedModel, ) else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
75
def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" return "".join([hex(SCREAMING_SNAKE_CASE )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE )] ) def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" if (len(SCREAMING_SNAKE_CASE ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(SCREAMING_SNAKE_CASE ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
43
0
"""simple docstring""" import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures a_ = logging.get_logger(__name__) @dataclass class UpperCAmelCase_ : UpperCamelCase =field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} ) UpperCamelCase =field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} ) UpperCamelCase =field( default=1_28 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) UpperCamelCase =field( default=snake_case , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def _lowerCamelCase ( self ) -> Optional[Any]: __lowercase : List[str] = self.task_name.lower() class UpperCAmelCase_ ( snake_case ): UpperCamelCase ="train" UpperCamelCase ="dev" UpperCamelCase ="test" class UpperCAmelCase_ ( snake_case ): UpperCamelCase =42 UpperCamelCase =42 UpperCamelCase =42 def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = Split.train , UpperCamelCase_ = None , ) -> List[str]: warnings.warn( '''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , UpperCamelCase_ , ) __lowercase : Optional[int] = args __lowercase : Optional[int] = glue_processors[args.task_name]() __lowercase : List[str] = glue_output_modes[args.task_name] if isinstance(UpperCamelCase_ , UpperCamelCase_ ): try: __lowercase : Union[str, Any] = Split[mode] except KeyError: raise KeyError('''mode is not a valid split name''' ) # Load data features from cache or dataset file __lowercase : Union[str, Any] = os.path.join( cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , ) __lowercase : Optional[int] = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) __lowercase ,__lowercase : str = label_list[2], label_list[1] __lowercase : Optional[int] = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __lowercase : List[Any] = cached_features_file + '''.lock''' with FileLock(UpperCamelCase_ ): if os.path.exists(UpperCamelCase_ ) and not args.overwrite_cache: __lowercase : List[Any] = time.time() __lowercase : Dict = torch.load(UpperCamelCase_ ) logger.info( F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start ) else: logger.info(F"""Creating features from dataset file at {args.data_dir}""" ) if mode == Split.dev: __lowercase : Dict = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: __lowercase : Tuple = self.processor.get_test_examples(args.data_dir ) else: __lowercase : int = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: __lowercase : str = examples[:limit_length] __lowercase : Optional[int] = glue_convert_examples_to_features( UpperCamelCase_ , UpperCamelCase_ , max_length=args.max_seq_length , label_list=UpperCamelCase_ , output_mode=self.output_mode , ) __lowercase : List[Any] = time.time() torch.save(self.features , UpperCamelCase_ ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" ) def __len__( self ) -> str: return len(self.features ) def __getitem__( self , UpperCamelCase_ ) -> InputFeatures: return self.features[i] def _lowerCamelCase ( self ) -> Optional[int]: return self.label_list
76
from __future__ import annotations def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ , lowercase__ = position lowercase__ = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] lowercase__ = [] for position in positions: lowercase__ , lowercase__ = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(SCREAMING_SNAKE_CASE ) return permissible_positions def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" return not any(elem == 0 for row in board for elem in row ) def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" if is_complete(SCREAMING_SNAKE_CASE ): return True for position in get_valid_pos(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ): lowercase__ , lowercase__ = position if board[y][x] == 0: lowercase__ = curr + 1 if open_knight_tour_helper(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , curr + 1 ): return True lowercase__ = 0 return False def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = [[0 for i in range(SCREAMING_SNAKE_CASE )] for j in range(SCREAMING_SNAKE_CASE )] for i in range(SCREAMING_SNAKE_CASE ): for j in range(SCREAMING_SNAKE_CASE ): lowercase__ = 1 if open_knight_tour_helper(SCREAMING_SNAKE_CASE , (i, j) , 1 ): return board lowercase__ = 0 lowercase__ = f'Open Kight Tour cannot be performed on a board of size {n}' raise ValueError(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
43
0
"""simple docstring""" import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel A = HfApi() A = {} # fmt: off A = torch.tensor([ -0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467, 1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189, -1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839, 0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557 ]) A = torch.tensor([ -2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436, 1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208, -2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948, 2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365 ]) A = torch.tensor([ -0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869, -0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304, -0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925, 0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943 ]) A = torch.tensor([ 0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172, -0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309, 0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805, -0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505 ]) A = torch.tensor([ 0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133, -0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395, 0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559, -0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386 ]) A = torch.tensor([ 0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078, -0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330, 0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683, -0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431 ]) A = torch.tensor([ 0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042, -0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398, 0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574, -0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390 ]) A = torch.tensor([ 0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042, -0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290, 0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746, -0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473 ]) A = torch.tensor([ -1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330, 1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243, -2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810, 1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251]) A = torch.tensor([ -1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324, 0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181, -2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259, 1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266 ]) A = torch.tensor([ -1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212, 0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027, -2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131, 1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355 ]) A = torch.tensor([ -2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959, 1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351, -3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341, 3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066 ]) A = torch.tensor([ -2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740, 1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398, -2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395, 2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243 ]) A = torch.tensor([ -2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336, 1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908, -3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560, 3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343 ]) A = torch.tensor([ -1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344, 1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391, -2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439, 1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219 ]) # fmt: on A = api.list_models(filter="""diffusers""") for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": A = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1] print(f'''Started running {mod.modelId}!!!''') if mod.modelId.startswith("""CompVis"""): A = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""") else: A = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) A = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) A = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): A = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3 ) print(f'''{mod.modelId} has passed successfully!!!''')
77
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name lowerCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n' @dataclass class _a ( UpperCamelCase__ ): _lowercase : Union[PIL.Image.Image, np.ndarray] class _a ( UpperCamelCase__ ): def __init__( self: Dict , UpperCamelCase_: PriorTransformer , UpperCamelCase_: CLIPVisionModel , UpperCamelCase_: CLIPImageProcessor , UpperCamelCase_: HeunDiscreteScheduler , UpperCamelCase_: ShapERenderer , ) -> List[str]: """simple docstring""" super().__init__() self.register_modules( prior=UpperCamelCase_ , image_encoder=UpperCamelCase_ , image_processor=UpperCamelCase_ , scheduler=UpperCamelCase_ , renderer=UpperCamelCase_ , ) def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple ) -> List[Any]: """simple docstring""" if latents is None: lowercase__ = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ ) else: if latents.shape != shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' ) lowercase__ = latents.to(UpperCamelCase_ ) lowercase__ = latents * scheduler.init_noise_sigma return latents def lowerCamelCase_ ( self: str , UpperCamelCase_: Tuple=0 ) -> int: """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) lowercase__ = torch.device(f'cuda:{gpu_id}' ) lowercase__ = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(UpperCamelCase_ , UpperCamelCase_ ) @property def lowerCamelCase_ ( self: List[Any] ) -> Dict: """simple docstring""" if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ): return self.device for module in self.image_encoder.modules(): if ( hasattr(UpperCamelCase_ , '''_hf_hook''' ) and hasattr(module._hf_hook , '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Tuple , UpperCamelCase_: str , ) -> Any: """simple docstring""" if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , torch.Tensor ): lowercase__ = torch.cat(UpperCamelCase_ , axis=0 ) if image[0].ndim == 4 else torch.stack(UpperCamelCase_ , axis=0 ) if not isinstance(UpperCamelCase_ , torch.Tensor ): lowercase__ = self.image_processor(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 ) lowercase__ = image.to(dtype=self.image_encoder.dtype , device=UpperCamelCase_ ) lowercase__ = self.image_encoder(UpperCamelCase_ )['''last_hidden_state'''] lowercase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 lowercase__ = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 ) if do_classifier_free_guidance: lowercase__ = torch.zeros_like(UpperCamelCase_ ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes lowercase__ = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(UpperCamelCase_ ) def __call__( self: Tuple , UpperCamelCase_: Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 25 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 64 , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ) -> Union[str, Any]: """simple docstring""" if isinstance(UpperCamelCase_ , PIL.Image.Image ): lowercase__ = 1 elif isinstance(UpperCamelCase_ , torch.Tensor ): lowercase__ = image.shape[0] elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): lowercase__ = len(UpperCamelCase_ ) else: raise ValueError( f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(UpperCamelCase_ )}' ) lowercase__ = self._execution_device lowercase__ = batch_size * num_images_per_prompt lowercase__ = guidance_scale > 1.0 lowercase__ = self._encode_image(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # prior self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ ) lowercase__ = self.scheduler.timesteps lowercase__ = self.prior.config.num_embeddings lowercase__ = self.prior.config.embedding_dim lowercase__ = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim lowercase__ = latents.reshape(latents.shape[0] , UpperCamelCase_ , UpperCamelCase_ ) for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ): # expand the latents if we are doing classifier free guidance lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowercase__ = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = self.prior( UpperCamelCase_ , timestep=UpperCamelCase_ , proj_embedding=UpperCamelCase_ , ).predicted_image_embedding # remove the variance lowercase__ , lowercase__ = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: lowercase__ , lowercase__ = noise_pred.chunk(2 ) lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) lowercase__ = self.scheduler.step( UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=UpperCamelCase_ ) lowercase__ = [] for i, latent in enumerate(UpperCamelCase_ ): print() lowercase__ = self.renderer.decode( latent[None, :] , UpperCamelCase_ , size=UpperCamelCase_ , ray_batch_size=4_096 , n_coarse_samples=64 , n_fine_samples=128 , ) images.append(UpperCamelCase_ ) lowercase__ = torch.stack(UpperCamelCase_ ) if output_type not in ["np", "pil"]: raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' ) lowercase__ = images.cpu().numpy() if output_type == "pil": lowercase__ = [self.numpy_to_pil(UpperCamelCase_ ) for image in images] # Offload last model to CPU if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=UpperCamelCase_ )
43
0
'''simple docstring''' from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class __A ( UpperCamelCase__ ): def __init__(self : int , __a : Optional[NestedDataStructureLike[PathLike]] = None , __a : Optional[NamedSplit] = None , __a : Optional[Features] = None , __a : str = None , __a : bool = False , __a : bool = False , __a : Optional[int] = None , **__a : Optional[Any] , ): UpperCAmelCase_ = path_or_paths UpperCAmelCase_ = split if split or isinstance(__a , __a ) else "train" UpperCAmelCase_ = features UpperCAmelCase_ = cache_dir UpperCAmelCase_ = keep_in_memory UpperCAmelCase_ = streaming UpperCAmelCase_ = num_proc UpperCAmelCase_ = kwargs @abstractmethod def _lowercase (self : Tuple ): pass class __A ( UpperCamelCase__ ): def __init__(self : Union[str, Any] , __a : Optional[Features] = None , __a : str = None , __a : bool = False , __a : bool = False , __a : Optional[int] = None , **__a : Any , ): UpperCAmelCase_ = features UpperCAmelCase_ = cache_dir UpperCAmelCase_ = keep_in_memory UpperCAmelCase_ = streaming UpperCAmelCase_ = num_proc UpperCAmelCase_ = kwargs @abstractmethod def _lowercase (self : List[str] ): pass
78
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo lowerCAmelCase = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n' lowerCAmelCase = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n' lowerCAmelCase = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _a ( datasets.Metric ): def lowerCamelCase_ ( self: Tuple ) -> MetricInfo: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ), } ) , ) def lowerCamelCase_ ( self: str , UpperCamelCase_: List[List[List[str]]] , UpperCamelCase_: List[List[str]] , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 4 , ) -> Dict[str, float]: """simple docstring""" return { "google_bleu": gleu_score.corpus_gleu( list_of_references=UpperCamelCase_ , hypotheses=UpperCamelCase_ , min_len=UpperCamelCase_ , max_len=UpperCamelCase_ ) }
43
0
import json import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import OneFormerImageProcessor from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput if is_vision_available(): from PIL import Image def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase="shi-labs/oneformer_demo" ) -> str: '''simple docstring''' with open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) as f: UpperCAmelCase__ : int = json.load(__lowerCamelCase ) UpperCAmelCase__ : Dict = {} UpperCAmelCase__ : Dict = [] UpperCAmelCase__ : int = [] for key, info in class_info.items(): UpperCAmelCase__ : int = info["""name"""] class_names.append(info["""name"""] ) if info["isthing"]: thing_ids.append(int(__lowerCamelCase ) ) UpperCAmelCase__ : List[str] = thing_ids UpperCAmelCase__ : Any = class_names return metadata class UpperCAmelCase_ ( unittest.TestCase ): def __init__( self , _lowerCAmelCase , _lowerCAmelCase=7 , _lowerCAmelCase=3 , _lowerCAmelCase=30 , _lowerCAmelCase=400 , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=[0.5, 0.5, 0.5] , _lowerCAmelCase=10 , _lowerCAmelCase=False , _lowerCAmelCase=255 , _lowerCAmelCase="shi-labs/oneformer_demo" , _lowerCAmelCase="ade20k_panoptic.json" , _lowerCAmelCase=10 , ): UpperCAmelCase__ : Any = parent UpperCAmelCase__ : Tuple = batch_size UpperCAmelCase__ : List[Any] = num_channels UpperCAmelCase__ : str = min_resolution UpperCAmelCase__ : Dict = max_resolution UpperCAmelCase__ : List[str] = do_resize UpperCAmelCase__ : Union[str, Any] = {"""shortest_edge""": 32, """longest_edge""": 1333} if size is None else size UpperCAmelCase__ : str = do_normalize UpperCAmelCase__ : Optional[Any] = image_mean UpperCAmelCase__ : Any = image_std UpperCAmelCase__ : str = class_info_file UpperCAmelCase__ : Tuple = prepare_metadata(_lowerCAmelCase , _lowerCAmelCase ) UpperCAmelCase__ : Dict = num_text UpperCAmelCase__ : Dict = repo_path # for the post_process_functions UpperCAmelCase__ : Union[str, Any] = 2 UpperCAmelCase__ : int = 10 UpperCAmelCase__ : int = 10 UpperCAmelCase__ : int = 3 UpperCAmelCase__ : int = 4 UpperCAmelCase__ : Any = num_labels UpperCAmelCase__ : List[Any] = do_reduce_labels UpperCAmelCase__ : int = ignore_index def __UpperCAmelCase ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "num_labels": self.num_labels, "do_reduce_labels": self.do_reduce_labels, "ignore_index": self.ignore_index, "class_info_file": self.class_info_file, "metadata": self.metadata, "num_text": self.num_text, } def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=False ): if not batched: UpperCAmelCase__ : Any = image_inputs[0] if isinstance(_lowerCAmelCase , Image.Image ): UpperCAmelCase__ , UpperCAmelCase__ : Tuple = image.size else: UpperCAmelCase__ , UpperCAmelCase__ : Any = image.shape[1], image.shape[2] if w < h: UpperCAmelCase__ : Dict = int(self.size["""shortest_edge"""] * h / w ) UpperCAmelCase__ : Tuple = self.size["""shortest_edge"""] elif w > h: UpperCAmelCase__ : Dict = self.size["""shortest_edge"""] UpperCAmelCase__ : Optional[int] = int(self.size["""shortest_edge"""] * w / h ) else: UpperCAmelCase__ : List[str] = self.size["""shortest_edge"""] UpperCAmelCase__ : Optional[Any] = self.size["""shortest_edge"""] else: UpperCAmelCase__ : int = [] for image in image_inputs: UpperCAmelCase__ , UpperCAmelCase__ : int = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCAmelCase__ : Optional[Any] = max(_lowerCAmelCase , key=lambda _lowerCAmelCase : item[0] )[0] UpperCAmelCase__ : Dict = max(_lowerCAmelCase , key=lambda _lowerCAmelCase : item[1] )[1] return expected_height, expected_width def __UpperCAmelCase ( self ): return OneFormerForUniversalSegmentationOutput( # +1 for null class class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , ) @require_torch @require_vision class UpperCAmelCase_ ( __lowerCamelCase , unittest.TestCase ): __lowerCamelCase = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_image_processing_common.test_image_proc_to_json_string __lowerCamelCase = image_processing_class def __UpperCAmelCase ( self ): UpperCAmelCase__ : Optional[int] = OneFormerImageProcessorTester(self ) @property def __UpperCAmelCase ( self ): return self.image_processing_tester.prepare_image_processor_dict() def __UpperCAmelCase ( self ): UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCAmelCase , """image_mean""" ) ) self.assertTrue(hasattr(_lowerCAmelCase , """image_std""" ) ) self.assertTrue(hasattr(_lowerCAmelCase , """do_normalize""" ) ) self.assertTrue(hasattr(_lowerCAmelCase , """do_resize""" ) ) self.assertTrue(hasattr(_lowerCAmelCase , """size""" ) ) self.assertTrue(hasattr(_lowerCAmelCase , """ignore_index""" ) ) self.assertTrue(hasattr(_lowerCAmelCase , """class_info_file""" ) ) self.assertTrue(hasattr(_lowerCAmelCase , """num_text""" ) ) self.assertTrue(hasattr(_lowerCAmelCase , """repo_path""" ) ) self.assertTrue(hasattr(_lowerCAmelCase , """metadata""" ) ) self.assertTrue(hasattr(_lowerCAmelCase , """do_reduce_labels""" ) ) def __UpperCAmelCase ( self ): pass def __UpperCAmelCase ( self ): # Initialize image_processor UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase , Image.Image ) # Test not batched input UpperCAmelCase__ : Optional[int] = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.image_processing_tester.get_expected_values(_lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.image_processing_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase ) UpperCAmelCase__ : str = image_processor( _lowerCAmelCase , ["""semantic"""] * len(_lowerCAmelCase ) , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def __UpperCAmelCase ( self ): # Initialize image_processor UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase , np.ndarray ) # Test not batched input UpperCAmelCase__ : Optional[Any] = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.image_processing_tester.get_expected_values(_lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.image_processing_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase ) UpperCAmelCase__ : Optional[Any] = image_processor( _lowerCAmelCase , ["""semantic"""] * len(_lowerCAmelCase ) , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def __UpperCAmelCase ( self ): # Initialize image_processor UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase , torch.Tensor ) # Test not batched input UpperCAmelCase__ : Optional[Any] = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.image_processing_tester.get_expected_values(_lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.image_processing_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase ) UpperCAmelCase__ : Union[str, Any] = image_processor( _lowerCAmelCase , ["""semantic"""] * len(_lowerCAmelCase ) , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def __UpperCAmelCase ( self , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase="np" ): UpperCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # prepare image and target UpperCAmelCase__ : Union[str, Any] = self.image_processing_tester.num_labels UpperCAmelCase__ : Tuple = None UpperCAmelCase__ : Optional[int] = None UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=_lowerCAmelCase ) if with_segmentation_maps: UpperCAmelCase__ : int = num_labels if is_instance_map: UpperCAmelCase__ : Optional[int] = list(range(_lowerCAmelCase ) ) * 2 UpperCAmelCase__ : Dict = dict(enumerate(_lowerCAmelCase ) ) UpperCAmelCase__ : int = [ np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs ] if segmentation_type == "pil": UpperCAmelCase__ : List[str] = [Image.fromarray(_lowerCAmelCase ) for annotation in annotations] UpperCAmelCase__ : List[Any] = image_processor( _lowerCAmelCase , ["""semantic"""] * len(_lowerCAmelCase ) , _lowerCAmelCase , return_tensors="""pt""" , instance_id_to_semantic_id=_lowerCAmelCase , pad_and_return_pixel_mask=_lowerCAmelCase , ) return inputs def __UpperCAmelCase ( self ): pass def __UpperCAmelCase ( self ): def common(_lowerCAmelCase=False , _lowerCAmelCase=None ): UpperCAmelCase__ : List[str] = self.comm_get_image_processor_inputs( with_segmentation_maps=_lowerCAmelCase , is_instance_map=_lowerCAmelCase , segmentation_type=_lowerCAmelCase ) UpperCAmelCase__ : Any = inputs["""mask_labels"""] UpperCAmelCase__ : Optional[Any] = inputs["""class_labels"""] UpperCAmelCase__ : Any = inputs["""pixel_values"""] UpperCAmelCase__ : Any = inputs["""text_inputs"""] # check the batch_size for mask_label, class_label, text_input in zip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): self.assertEqual(mask_label.shape[0] , class_label.shape[0] ) # this ensure padding has happened self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] ) self.assertEqual(len(_lowerCAmelCase ) , self.image_processing_tester.num_text ) common() common(is_instance_map=_lowerCAmelCase ) common(is_instance_map=_lowerCAmelCase , segmentation_type="""pil""" ) common(is_instance_map=_lowerCAmelCase , segmentation_type="""pil""" ) def __UpperCAmelCase ( self ): UpperCAmelCase__ : int = np.zeros((20, 50) ) UpperCAmelCase__ : List[Any] = 1 UpperCAmelCase__ : List[Any] = 1 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : str = binary_mask_to_rle(_lowerCAmelCase ) self.assertEqual(len(_lowerCAmelCase ) , 4 ) self.assertEqual(rle[0] , 21 ) self.assertEqual(rle[1] , 45 ) def __UpperCAmelCase ( self ): UpperCAmelCase__ : Any = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , ) UpperCAmelCase__ : Tuple = self.image_processing_tester.get_fake_oneformer_outputs() UpperCAmelCase__ : str = fature_extractor.post_process_semantic_segmentation(_lowerCAmelCase ) self.assertEqual(len(_lowerCAmelCase ) , self.image_processing_tester.batch_size ) self.assertEqual( segmentation[0].shape , ( self.image_processing_tester.height, self.image_processing_tester.width, ) , ) UpperCAmelCase__ : Tuple = [(1, 4) for i in range(self.image_processing_tester.batch_size )] UpperCAmelCase__ : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(_lowerCAmelCase , target_sizes=_lowerCAmelCase ) self.assertEqual(segmentation[0].shape , target_sizes[0] ) def __UpperCAmelCase ( self ): UpperCAmelCase__ : Any = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , ) UpperCAmelCase__ : str = self.image_processing_tester.get_fake_oneformer_outputs() UpperCAmelCase__ : Optional[int] = image_processor.post_process_instance_segmentation(_lowerCAmelCase , threshold=0 ) self.assertTrue(len(_lowerCAmelCase ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue("""segmentation""" in el ) self.assertTrue("""segments_info""" in el ) self.assertEqual(type(el["""segments_info"""] ) , _lowerCAmelCase ) self.assertEqual( el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) ) def __UpperCAmelCase ( self ): UpperCAmelCase__ : int = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , ) UpperCAmelCase__ : List[Any] = self.image_processing_tester.get_fake_oneformer_outputs() UpperCAmelCase__ : List[Any] = image_processor.post_process_panoptic_segmentation(_lowerCAmelCase , threshold=0 ) self.assertTrue(len(_lowerCAmelCase ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue("""segmentation""" in el ) self.assertTrue("""segments_info""" in el ) self.assertEqual(type(el["""segments_info"""] ) , _lowerCAmelCase ) self.assertEqual( el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
79
import unittest from diffusers.models.unet_ad_blocks import * # noqa F403 from diffusers.utils import torch_device from .test_unet_blocks_common import UNetBlockTesterMixin class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Optional[Any] = DownBlockaD # noqa F405 _lowercase : Dict = '''down''' def lowerCamelCase_ ( self: List[str] ) -> Tuple: """simple docstring""" lowercase__ = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : List[str] = ResnetDownsampleBlockaD # noqa F405 _lowercase : Tuple = '''down''' def lowerCamelCase_ ( self: List[Any] ) -> str: """simple docstring""" lowercase__ = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Any = AttnDownBlockaD # noqa F405 _lowercase : List[Any] = '''down''' def lowerCamelCase_ ( self: Dict ) -> List[str]: """simple docstring""" lowercase__ = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Tuple = CrossAttnDownBlockaD # noqa F405 _lowercase : Optional[int] = '''down''' def lowerCamelCase_ ( self: Optional[Any] ) -> Any: """simple docstring""" lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common() lowercase__ = 32 return init_dict, inputs_dict def lowerCamelCase_ ( self: str ) -> Tuple: """simple docstring""" lowercase__ = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Any = SimpleCrossAttnDownBlockaD # noqa F405 _lowercase : str = '''down''' @property def lowerCamelCase_ ( self: Optional[Any] ) -> List[Any]: """simple docstring""" return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ ) def lowerCamelCase_ ( self: List[str] ) -> List[str]: """simple docstring""" lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common() lowercase__ = 32 return init_dict, inputs_dict @unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' ) def lowerCamelCase_ ( self: Any ) -> int: """simple docstring""" lowercase__ = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Tuple = SkipDownBlockaD # noqa F405 _lowercase : Tuple = '''down''' @property def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]: """simple docstring""" return super().get_dummy_input(include_skip_sample=UpperCamelCase_ ) def lowerCamelCase_ ( self: Dict ) -> List[Any]: """simple docstring""" lowercase__ = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Optional[int] = AttnSkipDownBlockaD # noqa F405 _lowercase : Optional[int] = '''down''' @property def lowerCamelCase_ ( self: str ) -> int: """simple docstring""" return super().get_dummy_input(include_skip_sample=UpperCamelCase_ ) def lowerCamelCase_ ( self: Tuple ) -> Any: """simple docstring""" lowercase__ = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : int = DownEncoderBlockaD # noqa F405 _lowercase : List[Any] = '''down''' @property def lowerCamelCase_ ( self: List[str] ) -> str: """simple docstring""" return super().get_dummy_input(include_temb=UpperCamelCase_ ) def lowerCamelCase_ ( self: Any ) -> List[Any]: """simple docstring""" lowercase__ = { '''in_channels''': 32, '''out_channels''': 32, } lowercase__ = self.dummy_input return init_dict, inputs_dict def lowerCamelCase_ ( self: str ) -> Dict: """simple docstring""" lowercase__ = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : List[str] = AttnDownEncoderBlockaD # noqa F405 _lowercase : int = '''down''' @property def lowerCamelCase_ ( self: Dict ) -> Optional[Any]: """simple docstring""" return super().get_dummy_input(include_temb=UpperCamelCase_ ) def lowerCamelCase_ ( self: str ) -> List[str]: """simple docstring""" lowercase__ = { '''in_channels''': 32, '''out_channels''': 32, } lowercase__ = self.dummy_input return init_dict, inputs_dict def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]: """simple docstring""" lowercase__ = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Union[str, Any] = UNetMidBlockaD # noqa F405 _lowercase : Union[str, Any] = '''mid''' def lowerCamelCase_ ( self: Any ) -> int: """simple docstring""" lowercase__ = { '''in_channels''': 32, '''temb_channels''': 128, } lowercase__ = self.dummy_input return init_dict, inputs_dict def lowerCamelCase_ ( self: Any ) -> Any: """simple docstring""" lowercase__ = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Optional[int] = UNetMidBlockaDCrossAttn # noqa F405 _lowercase : str = '''mid''' def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]: """simple docstring""" lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common() lowercase__ = 32 return init_dict, inputs_dict def lowerCamelCase_ ( self: Dict ) -> List[str]: """simple docstring""" lowercase__ = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Tuple = UNetMidBlockaDSimpleCrossAttn # noqa F405 _lowercase : str = '''mid''' @property def lowerCamelCase_ ( self: int ) -> List[Any]: """simple docstring""" return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ ) def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[Any]: """simple docstring""" lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common() lowercase__ = 32 return init_dict, inputs_dict def lowerCamelCase_ ( self: Union[str, Any] ) -> int: """simple docstring""" lowercase__ = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Union[str, Any] = UpBlockaD # noqa F405 _lowercase : Any = '''up''' @property def lowerCamelCase_ ( self: str ) -> str: """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ ) def lowerCamelCase_ ( self: int ) -> List[Any]: """simple docstring""" lowercase__ = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Tuple = ResnetUpsampleBlockaD # noqa F405 _lowercase : List[Any] = '''up''' @property def lowerCamelCase_ ( self: List[Any] ) -> Union[str, Any]: """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ ) def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[int]: """simple docstring""" lowercase__ = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Any = CrossAttnUpBlockaD # noqa F405 _lowercase : List[str] = '''up''' @property def lowerCamelCase_ ( self: int ) -> Any: """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ ) def lowerCamelCase_ ( self: Any ) -> Any: """simple docstring""" lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common() lowercase__ = 32 return init_dict, inputs_dict def lowerCamelCase_ ( self: Dict ) -> Optional[int]: """simple docstring""" lowercase__ = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Union[str, Any] = SimpleCrossAttnUpBlockaD # noqa F405 _lowercase : Dict = '''up''' @property def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]: """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ , include_encoder_hidden_states=UpperCamelCase_ ) def lowerCamelCase_ ( self: str ) -> int: """simple docstring""" lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common() lowercase__ = 32 return init_dict, inputs_dict def lowerCamelCase_ ( self: Union[str, Any] ) -> int: """simple docstring""" lowercase__ = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : List[str] = AttnUpBlockaD # noqa F405 _lowercase : Optional[Any] = '''up''' @property def lowerCamelCase_ ( self: Tuple ) -> int: """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ ) @unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' ) def lowerCamelCase_ ( self: List[str] ) -> List[str]: """simple docstring""" lowercase__ = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Dict = SkipUpBlockaD # noqa F405 _lowercase : Optional[int] = '''up''' @property def lowerCamelCase_ ( self: Dict ) -> int: """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ ) def lowerCamelCase_ ( self: Optional[Any] ) -> Dict: """simple docstring""" lowercase__ = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : List[str] = AttnSkipUpBlockaD # noqa F405 _lowercase : str = '''up''' @property def lowerCamelCase_ ( self: Optional[Any] ) -> Dict: """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ ) def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]: """simple docstring""" lowercase__ = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Dict = UpDecoderBlockaD # noqa F405 _lowercase : Tuple = '''up''' @property def lowerCamelCase_ ( self: int ) -> str: """simple docstring""" return super().get_dummy_input(include_temb=UpperCamelCase_ ) def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]: """simple docstring""" lowercase__ = {'''in_channels''': 32, '''out_channels''': 32} lowercase__ = self.dummy_input return init_dict, inputs_dict def lowerCamelCase_ ( self: Tuple ) -> Any: """simple docstring""" lowercase__ = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Optional[int] = AttnUpDecoderBlockaD # noqa F405 _lowercase : str = '''up''' @property def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]: """simple docstring""" return super().get_dummy_input(include_temb=UpperCamelCase_ ) def lowerCamelCase_ ( self: Dict ) -> List[str]: """simple docstring""" lowercase__ = {'''in_channels''': 32, '''out_channels''': 32} lowercase__ = self.dummy_input return init_dict, inputs_dict def lowerCamelCase_ ( self: int ) -> Optional[Any]: """simple docstring""" lowercase__ = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568] super().test_output(UpperCamelCase_ )
43
0
import re def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = re.compile( r"""^(?:0|94|\+94|0{2}94)""" r"""7(0|1|2|4|5|6|7|8)""" r"""(-| |)""" r"""\d{7}$""" ) return bool(re.search(lowerCamelCase , lowerCamelCase ) ) if __name__ == "__main__": __UpperCamelCase : Dict = """0094702343221""" print(is_sri_lankan_phone_number(phone))
80
def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" lowercase__ = set() # Replace all the whitespace in our sentence lowercase__ = input_str.replace(''' ''' , '''''' ) for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower() ) return len(SCREAMING_SNAKE_CASE ) == 26 def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" lowercase__ = [False] * 26 for char in input_str: if char.islower(): lowercase__ = True elif char.isupper(): lowercase__ = True return all(SCREAMING_SNAKE_CASE ) def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" return len({char for char in input_str.lower() if char.isalpha()} ) == 26 def _a ( ): """simple docstring""" from timeit import timeit lowercase__ = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest''' print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE ) ) print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE ) ) print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE ) ) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
43
0
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging _snake_case : Union[str, Any] = logging.get_logger(__name__) _snake_case : Union[str, Any] = { "Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json", # See all Marian models at https://huggingface.co/models?filter=marian } class a (_lowerCAmelCase ): """simple docstring""" __UpperCAmelCase : Any = "marian" __UpperCAmelCase : str = ["past_key_values"] __UpperCAmelCase : int = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : List[Any] , lowerCamelCase : Tuple=58101 , lowerCamelCase : str=None , lowerCamelCase : Dict=1024 , lowerCamelCase : Tuple=12 , lowerCamelCase : List[str]=4096 , lowerCamelCase : Any=16 , lowerCamelCase : Tuple=12 , lowerCamelCase : Dict=4096 , lowerCamelCase : Union[str, Any]=16 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : Optional[Any]=0.0 , lowerCamelCase : Tuple=True , lowerCamelCase : str=True , lowerCamelCase : int="gelu" , lowerCamelCase : Any=1024 , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : List[Any]=0.0 , lowerCamelCase : int=0.0 , lowerCamelCase : Optional[int]=0.02 , lowerCamelCase : Optional[Any]=58100 , lowerCamelCase : str=False , lowerCamelCase : Dict=58100 , lowerCamelCase : Union[str, Any]=0 , lowerCamelCase : Tuple=0 , lowerCamelCase : List[str]=True , **lowerCamelCase : int , ) -> Optional[Any]: __snake_case : Dict = vocab_size __snake_case : List[str] = decoder_vocab_size or vocab_size __snake_case : int = max_position_embeddings __snake_case : int = d_model __snake_case : Optional[Any] = encoder_ffn_dim __snake_case : str = encoder_layers __snake_case : Optional[int] = encoder_attention_heads __snake_case : Any = decoder_ffn_dim __snake_case : str = decoder_layers __snake_case : Dict = decoder_attention_heads __snake_case : str = dropout __snake_case : Any = attention_dropout __snake_case : int = activation_dropout __snake_case : Optional[Any] = activation_function __snake_case : str = init_std __snake_case : List[str] = encoder_layerdrop __snake_case : str = decoder_layerdrop __snake_case : Optional[Any] = use_cache __snake_case : Tuple = encoder_layers __snake_case : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True __snake_case : Dict = share_encoder_decoder_embeddings super().__init__( pad_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , is_encoder_decoder=lowerCamelCase , decoder_start_token_id=lowerCamelCase , forced_eos_token_id=lowerCamelCase , **lowerCamelCase , ) class a (_lowerCAmelCase ): """simple docstring""" @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def __snake_case ( self : str ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: __snake_case : Optional[Any] = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: __snake_case : Optional[int] = {0: "batch"} __snake_case : Any = {0: "batch", 1: "past_decoder_sequence + sequence"} else: __snake_case : Union[str, Any] = {0: "batch", 1: "decoder_sequence"} __snake_case : List[Any] = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(lowerCamelCase , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. __snake_case : Dict = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: __snake_case , __snake_case : Dict = self.num_layers for i in range(lowerCamelCase ): __snake_case : List[str] = {0: "batch", 2: "past_sequence + sequence"} __snake_case : Any = {0: "batch", 2: "past_sequence + sequence"} else: __snake_case : Dict = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def __snake_case ( self : Dict ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: __snake_case : Dict = super().outputs else: __snake_case : Optional[int] = super(lowerCamelCase , self ).outputs if self.use_past: __snake_case , __snake_case : Dict = self.num_layers for i in range(lowerCamelCase ): __snake_case : Dict = {0: "batch", 2: "past_sequence + sequence"} __snake_case : List[str] = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def __snake_case ( self : Any , lowerCamelCase : PreTrainedTokenizer , lowerCamelCase : int = -1 , lowerCamelCase : int = -1 , lowerCamelCase : bool = False , lowerCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]: __snake_case : Tuple = self._generate_dummy_inputs_for_encoder_and_decoder( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) # Generate decoder inputs __snake_case : Any = seq_length if not self.use_past else 1 __snake_case : List[str] = self._generate_dummy_inputs_for_encoder_and_decoder( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) __snake_case : Optional[int] = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()} __snake_case : int = dict(**lowerCamelCase , **lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch __snake_case , __snake_case : Any = common_inputs["input_ids"].shape __snake_case : Tuple = common_inputs["decoder_input_ids"].shape[1] __snake_case , __snake_case : Optional[Any] = self.num_attention_heads __snake_case : List[Any] = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) __snake_case : int = decoder_seq_length + 3 __snake_case : List[str] = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) __snake_case : Optional[int] = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(lowerCamelCase , lowerCamelCase )] , dim=1 ) __snake_case : Optional[Any] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered __snake_case , __snake_case : str = self.num_layers __snake_case : Any = min(lowerCamelCase , lowerCamelCase ) __snake_case : Optional[Any] = max(lowerCamelCase , lowerCamelCase ) - min_num_layers __snake_case : List[str] = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(lowerCamelCase ): common_inputs["past_key_values"].append( ( torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase ), ) ) # TODO: test this. __snake_case : List[Any] = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(lowerCamelCase , lowerCamelCase ): common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) ) return common_inputs def __snake_case ( self : int , lowerCamelCase : PreTrainedTokenizer , lowerCamelCase : int = -1 , lowerCamelCase : int = -1 , lowerCamelCase : bool = False , lowerCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]: __snake_case : Any = self._generate_dummy_inputs_for_encoder_and_decoder( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch __snake_case , __snake_case : int = common_inputs["input_ids"].shape # Not using the same length for past_key_values __snake_case : List[Any] = seqlen + 2 __snake_case , __snake_case : List[Any] = self.num_layers __snake_case , __snake_case : Union[str, Any] = self.num_attention_heads __snake_case : Optional[Any] = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) __snake_case : Any = common_inputs["attention_mask"].dtype __snake_case : Optional[int] = torch.cat( [common_inputs["attention_mask"], torch.ones(lowerCamelCase , lowerCamelCase , dtype=lowerCamelCase )] , dim=1 ) __snake_case : Tuple = [ (torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(lowerCamelCase ) ] return common_inputs def __snake_case ( self : Optional[int] , lowerCamelCase : PreTrainedTokenizer , lowerCamelCase : int = -1 , lowerCamelCase : int = -1 , lowerCamelCase : bool = False , lowerCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX __snake_case : Optional[int] = compute_effective_axis_dimension( lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __snake_case : List[Any] = tokenizer.num_special_tokens_to_add(lowerCamelCase ) __snake_case : Optional[int] = compute_effective_axis_dimension( lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase ) # Generate dummy inputs according to compute batch and sequence __snake_case : List[Any] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size __snake_case : List[str] = dict(tokenizer(lowerCamelCase , return_tensors=lowerCamelCase ) ) return common_inputs def __snake_case ( self : List[str] , lowerCamelCase : PreTrainedTokenizer , lowerCamelCase : int = -1 , lowerCamelCase : int = -1 , lowerCamelCase : bool = False , lowerCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: __snake_case : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase ) else: __snake_case : str = self._generate_dummy_inputs_for_causal_lm( lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase ) return common_inputs def __snake_case ( self : str , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple , lowerCamelCase : Any , lowerCamelCase : str ) -> List[Any]: if self.task in ["default", "seq2seq-lm"]: __snake_case : List[Any] = super()._flatten_past_key_values_(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) else: __snake_case : Tuple = super(lowerCamelCase , self )._flatten_past_key_values_( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) @property def __snake_case ( self : List[str] ) -> float: return 1E-4
81
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ = np.full((len(SCREAMING_SNAKE_CASE ), sequence_length, 2) , SCREAMING_SNAKE_CASE ) else: lowercase__ = np.full((len(SCREAMING_SNAKE_CASE ), sequence_length) , SCREAMING_SNAKE_CASE ) for i, tensor in enumerate(SCREAMING_SNAKE_CASE ): if padding_side == "right": if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ = tensor[:sequence_length] else: lowercase__ = tensor[:sequence_length] else: if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ = tensor[:sequence_length] else: lowercase__ = tensor[:sequence_length] return out_tensor.tolist() def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = ord(SCREAMING_SNAKE_CASE ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26): return True lowercase__ = unicodedata.category(SCREAMING_SNAKE_CASE ) if cat.startswith('''P''' ): return True return False @dataclass class _a ( UpperCamelCase__ ): _lowercase : PreTrainedTokenizerBase _lowercase : Union[bool, str, PaddingStrategy] = True _lowercase : Optional[int] = None _lowercase : Optional[int] = None _lowercase : int = -100 _lowercase : str = "pt" def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Optional[Any] ) -> List[Any]: """simple docstring""" import torch lowercase__ = '''label''' if '''label''' in features[0].keys() else '''labels''' lowercase__ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None lowercase__ = self.tokenizer.pad( UpperCamelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , ) if labels is None: return batch lowercase__ = torch.tensor(batch['''entity_ids'''] ).shape[1] lowercase__ = self.tokenizer.padding_side if padding_side == "right": lowercase__ = [ list(UpperCamelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCamelCase_ )) for label in labels ] else: lowercase__ = [ [self.label_pad_token_id] * (sequence_length - len(UpperCamelCase_ )) + list(UpperCamelCase_ ) for label in labels ] lowercase__ = [feature['''ner_tags'''] for feature in features] lowercase__ = padding_tensor(UpperCamelCase_ , -1 , UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = [feature['''original_entity_spans'''] for feature in features] lowercase__ = padding_tensor(UpperCamelCase_ , (-1, -1) , UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = {k: torch.tensor(UpperCamelCase_ , dtype=torch.intaa ) for k, v in batch.items()} return batch
43
0
"""simple docstring""" def a__ ( lowerCAmelCase__ ): if len(lowerCAmelCase__ ) <= 1: return lst UpperCAmelCase_ = 1 while i < len(lowerCAmelCase__ ): if lst[i - 1] <= lst[i]: i += 1 else: UpperCAmelCase_ , UpperCAmelCase_ = lst[i], lst[i - 1] i -= 1 if i == 0: UpperCAmelCase_ = 1 return lst if __name__ == "__main__": lowerCamelCase = input("""Enter numbers separated by a comma:\n""").strip() lowerCamelCase = [int(item) for item in user_input.split(""",""")] print(gnome_sort(unsorted))
82
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class _a ( UpperCamelCase__ ): def __init__( self: int , *UpperCamelCase_: str , UpperCamelCase_: List[str]=None , UpperCamelCase_: int=None , **UpperCamelCase_: Optional[Any] ) -> List[str]: """simple docstring""" super().__init__(*UpperCamelCase_ , **UpperCamelCase_ ) lowercase__ = eval_examples lowercase__ = post_process_function def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Optional[Dataset] = None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: str = "eval" , **UpperCamelCase_: int , ) -> Dict[str, float]: """simple docstring""" lowercase__ = gen_kwargs.copy() lowercase__ = ( gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length ) lowercase__ = ( gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams ) lowercase__ = gen_kwargs lowercase__ = self.eval_dataset if eval_dataset is None else eval_dataset lowercase__ = self.get_eval_dataloader(UpperCamelCase_ ) lowercase__ = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. lowercase__ = self.compute_metrics lowercase__ = None lowercase__ = time.time() lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: lowercase__ = eval_loop( UpperCamelCase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , ) finally: lowercase__ = compute_metrics lowercase__ = self.args.eval_batch_size * self.args.world_size if f'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time'] output.metrics.update( speed_metrics( UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = self.compute_metrics(UpperCamelCase_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'{metric_key_prefix}_' ): lowercase__ = metrics.pop(UpperCamelCase_ ) metrics.update(output.metrics ) else: lowercase__ = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(UpperCamelCase_ ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) lowercase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase_ ) return metrics def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: List[str]=None , UpperCamelCase_: str = "test" , **UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]: """simple docstring""" lowercase__ = gen_kwargs.copy() lowercase__ = self.get_test_dataloader(UpperCamelCase_ ) # Temporarily disable metric computation, we will do it in the loop here. lowercase__ = self.compute_metrics lowercase__ = None lowercase__ = time.time() lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: lowercase__ = eval_loop( UpperCamelCase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , ) finally: lowercase__ = compute_metrics lowercase__ = self.args.eval_batch_size * self.args.world_size if f'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time'] output.metrics.update( speed_metrics( UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , '''predict''' ) lowercase__ = self.compute_metrics(UpperCamelCase_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'{metric_key_prefix}_' ): lowercase__ = metrics.pop(UpperCamelCase_ ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase_ )
43
0
"""simple docstring""" import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def snake_case_ ( A_ : List[str] ): '''simple docstring''' _lowerCamelCase : Any = filter(lambda A_ : p.requires_grad, model.parameters() ) _lowerCamelCase : Union[str, Any] = sum([np.prod(p.size() ) for p in model_parameters] ) return params lowerCAmelCase__ = logging.getLogger(__name__) def snake_case_ ( A_ : Optional[Any], A_ : Any ): '''simple docstring''' if metric == "rouge2": _lowerCamelCase : Union[str, Any] = '''{val_avg_rouge2:.4f}-{step_count}''' elif metric == "bleu": _lowerCamelCase : Dict = '''{val_avg_bleu:.4f}-{step_count}''' elif metric == "em": _lowerCamelCase : Optional[int] = '''{val_avg_em:.4f}-{step_count}''' elif metric == "loss": _lowerCamelCase : List[str] = '''{val_avg_loss:.4f}-{step_count}''' else: raise NotImplementedError( F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this''' ''' function.''' ) _lowerCamelCase : str = ModelCheckpoint( dirpath=A_, filename=A_, monitor=F'''val_{metric}''', mode='''max''', save_top_k=1, every_n_epochs=1, ) return checkpoint_callback def snake_case_ ( A_ : Any, A_ : int ): '''simple docstring''' return EarlyStopping( monitor=F'''val_{metric}''', mode='''min''' if '''loss''' in metric else '''max''', patience=A_, verbose=A_, ) class __snake_case ( pl.Callback): def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Any , __lowerCAmelCase : int ): """simple docstring""" _lowerCamelCase : str = {f'''lr_group_{i}''': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(__lowerCAmelCase ) @rank_zero_only def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : pl.Trainer , __lowerCAmelCase : pl.LightningModule , __lowerCAmelCase : str , __lowerCAmelCase : Tuple=True ): """simple docstring""" logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' ) _lowerCamelCase : Any = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} ) # Log results _lowerCamelCase : Optional[Any] = Path(pl_module.hparams.output_dir ) if type_path == "test": _lowerCamelCase : List[str] = od / '''test_results.txt''' _lowerCamelCase : Dict = od / '''test_generations.txt''' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. _lowerCamelCase : Tuple = od / f'''{type_path}_results/{trainer.global_step:05d}.txt''' _lowerCamelCase : str = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt''' results_file.parent.mkdir(exist_ok=__lowerCAmelCase ) generations_file.parent.mkdir(exist_ok=__lowerCAmelCase ) with open(__lowerCAmelCase , '''a+''' ) as writer: for key in sorted(__lowerCAmelCase ): if key in ["log", "progress_bar", "preds"]: continue _lowerCamelCase : int = metrics[key] if isinstance(__lowerCAmelCase , torch.Tensor ): _lowerCamelCase : Dict = val.item() _lowerCamelCase : Any = f'''{key}: {val:.6f}\n''' writer.write(__lowerCAmelCase ) if not save_generations: return if "preds" in metrics: _lowerCamelCase : str = '''\n'''.join(metrics['''preds'''] ) generations_file.open('''w+''' ).write(__lowerCAmelCase ) @rank_zero_only def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Any ): """simple docstring""" try: _lowerCamelCase : int = pl_module.model.model.num_parameters() except AttributeError: _lowerCamelCase : List[str] = pl_module.model.num_parameters() _lowerCamelCase : str = count_trainable_parameters(__lowerCAmelCase ) # mp stands for million parameters trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6} ) @rank_zero_only def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : pl.Trainer , __lowerCAmelCase : pl.LightningModule ): """simple docstring""" save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(__lowerCAmelCase , __lowerCAmelCase , '''test''' ) @rank_zero_only def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : pl.Trainer , __lowerCAmelCase : Optional[Any] ): """simple docstring""" save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
83
import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = os.path.join(args.tf_model_dir , '''parameters.json''' ) lowercase__ = json.loads(open(SCREAMING_SNAKE_CASE ).read() ) if not params: raise ValueError( f'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' ) if not args.output.endswith('''.pt''' ): lowercase__ = args.output + '''.pt''' lowercase__ = OrderedDict() with tf.device('''/CPU:0''' ): lowercase__ = tf.train.load_checkpoint(args.tf_model_dir ) lowercase__ = reader.get_variable_to_shape_map() for key_name in shapes.keys(): lowercase__ = reader.get_tensor(SCREAMING_SNAKE_CASE ).astype(np.floataa ) if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ): continue if key_name.startswith('''pasts/''' ): if key_name.startswith('''pasts/mlp''' ): lowercase__ = int(key_name[9] ) elif key_name.startswith('''pasts/out''' ): lowercase__ = 8 lowercase__ = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/moe''' ): lowercase__ = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/switch_gating/kernel''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/softmlp/kernel''' ): lowercase__ = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ): lowercase__ = key_name[-9:-7] for i in range(16 ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer) lowercase__ = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/mlp''' ): lowercase__ = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/p1/kernel''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/p1/bias''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/p2/kernel''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/p2/bias''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/ln''' ): lowercase__ = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): lowercase__ = '''model.blocks.%d.feed_forward.norm.bias''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/g''' ): lowercase__ = '''model.blocks.%d.feed_forward.norm.weight''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/att''' ): lowercase__ = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/qkv/kernel''' ): lowercase__ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum lowercase__ = state[:, 0, :, :] lowercase__ = state[:, 1, :, :] lowercase__ = state[:, 2, :, :] lowercase__ = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase__ = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase__ = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase__ = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) lowercase__ = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) lowercase__ = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/o/kernel''' ): lowercase__ = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player lowercase__ = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/an''' ): lowercase__ = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): lowercase__ = '''model.blocks.%d.self_attn.norm.bias''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/g''' ): lowercase__ = '''model.blocks.%d.self_attn.norm.weight''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif ( key_name.startswith('''model/wte''' ) or key_name.startswith('''model/wpe''' ) or key_name.startswith('''model/ete''' ) ): lowercase__ = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[ key_name[-3:] ] lowercase__ = '''model.%s.weight''' % nlayer lowercase__ = vnp.copy() # same in embedded lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) if key_name.startswith('''model/wte''' ): lowercase__ = '''lm_head.weight''' lowercase__ = vnp.copy() # same in embedded lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/wob''' ): lowercase__ = '''final_logits_bias''' lowercase__ = vnp.copy() # same in embedded lowercase__ = state.reshape((1, -1) ) lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name == "model/dense/kernel": lowercase__ = '''model.last_project.weight''' lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name == "model/dense_1/bias": lowercase__ = '''model.last_project.bias''' lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) torch.save(SCREAMING_SNAKE_CASE , args.output ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser( description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model') parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model') lowerCAmelCase = parser.parse_args() convert_tf_gptsan_to_pt(args)
43
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { '''Salesforce/blip-vqa-base''': '''https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json''', '''Salesforce/blip-vqa-capfit-large''': ( '''https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json''' ), '''Salesforce/blip-image-captioning-base''': ( '''https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json''' ), '''Salesforce/blip-image-captioning-large''': ( '''https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json''' ), '''Salesforce/blip-itm-base-coco''': '''https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json''', '''Salesforce/blip-itm-large-coco''': '''https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json''', '''Salesforce/blip-itm-base-flikr''': '''https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json''', '''Salesforce/blip-itm-large-flikr''': ( '''https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json''' ), } class A_ ( __lowerCamelCase ): '''simple docstring''' _UpperCamelCase : List[str] = """blip_text_model""" def __init__( self , snake_case=3_0524 , snake_case=768 , snake_case=768 , snake_case=3072 , snake_case=768 , snake_case=12 , snake_case=8 , snake_case=512 , snake_case="gelu" , snake_case=1E-12 , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=3_0522 , snake_case=2 , snake_case=0 , snake_case=102 , snake_case=True , snake_case=True , **snake_case , ): super().__init__( pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , sep_token_id=snake_case , **snake_case , ) lowercase = vocab_size lowercase = hidden_size lowercase = encoder_hidden_size lowercase = intermediate_size lowercase = projection_dim lowercase = hidden_dropout_prob lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = max_position_embeddings lowercase = layer_norm_eps lowercase = hidden_act lowercase = initializer_range lowercase = attention_probs_dropout_prob lowercase = is_decoder lowercase = use_cache @classmethod def SCREAMING_SNAKE_CASE__ ( cls , snake_case , **snake_case ): cls._set_token_in_kwargs(snake_case ) lowercase , lowercase = cls.get_config_dict(snake_case , **snake_case ) # get the text config dict if we are loading from BlipConfig if config_dict.get('model_type' ) == "blip": lowercase = config_dict['text_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(snake_case , **snake_case ) class A_ ( __lowerCamelCase ): '''simple docstring''' _UpperCamelCase : Tuple = """blip_vision_model""" def __init__( self , snake_case=768 , snake_case=3072 , snake_case=512 , snake_case=12 , snake_case=12 , snake_case=384 , snake_case=16 , snake_case="gelu" , snake_case=1E-5 , snake_case=0.0 , snake_case=1E-10 , **snake_case , ): super().__init__(**snake_case ) lowercase = hidden_size lowercase = intermediate_size lowercase = projection_dim lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = patch_size lowercase = image_size lowercase = initializer_range lowercase = attention_dropout lowercase = layer_norm_eps lowercase = hidden_act @classmethod def SCREAMING_SNAKE_CASE__ ( cls , snake_case , **snake_case ): cls._set_token_in_kwargs(snake_case ) lowercase , lowercase = cls.get_config_dict(snake_case , **snake_case ) # get the vision config dict if we are loading from BlipConfig if config_dict.get('model_type' ) == "blip": lowercase = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(snake_case , **snake_case ) class A_ ( __lowerCamelCase ): '''simple docstring''' _UpperCamelCase : Optional[int] = """blip""" _UpperCamelCase : Tuple = True def __init__( self , snake_case=None , snake_case=None , snake_case=512 , snake_case=2.6_592 , snake_case=256 , **snake_case , ): super().__init__(**snake_case ) if text_config is None: lowercase = {} logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' ) if vision_config is None: lowercase = {} logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' ) lowercase = BlipTextConfig(**snake_case ) lowercase = BlipVisionConfig(**snake_case ) lowercase = self.vision_config.hidden_size lowercase = projection_dim lowercase = logit_scale_init_value lowercase = 1.0 lowercase = 0.02 lowercase = image_text_hidden_size @classmethod def SCREAMING_SNAKE_CASE__ ( cls , snake_case , snake_case , **snake_case ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case ) def SCREAMING_SNAKE_CASE__ ( self ): lowercase = copy.deepcopy(self.__dict__ ) lowercase = self.text_config.to_dict() lowercase = self.vision_config.to_dict() lowercase = self.__class__.model_type return output
84
from __future__ import annotations def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" return len(set(SCREAMING_SNAKE_CASE ) ) == len(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
43
0
from __future__ import annotations def _a ( lowercase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = [True] * limit SCREAMING_SNAKE_CASE__ : Optional[Any] = False SCREAMING_SNAKE_CASE__ : Dict = False SCREAMING_SNAKE_CASE__ : int = True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): SCREAMING_SNAKE_CASE__ : Optional[Any] = i * 2 while index < limit: SCREAMING_SNAKE_CASE__ : Tuple = False SCREAMING_SNAKE_CASE__ : Tuple = index + i SCREAMING_SNAKE_CASE__ : List[str] = [2] for i in range(3 , lowercase__ , 2 ): if is_prime[i]: primes.append(lowercase__ ) return primes def _a ( lowercase__ : int = 1_00_00_00 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = prime_sieve(lowercase__ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0 SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0 for i in range(len(lowercase__ ) ): for j in range(i + length , len(lowercase__ ) ): SCREAMING_SNAKE_CASE__ : int = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: SCREAMING_SNAKE_CASE__ : Any = j - i SCREAMING_SNAKE_CASE__ : int = sol return largest if __name__ == "__main__": print(F"""{solution() = }""")
85
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase = { 'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'], 'tokenization_convbert': ['ConvBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['ConvBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ 'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvBertForMaskedLM', 'ConvBertForMultipleChoice', 'ConvBertForQuestionAnswering', 'ConvBertForSequenceClassification', 'ConvBertForTokenClassification', 'ConvBertLayer', 'ConvBertModel', 'ConvBertPreTrainedModel', 'load_tf_weights_in_convbert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ 'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFConvBertForMaskedLM', 'TFConvBertForMultipleChoice', 'TFConvBertForQuestionAnswering', 'TFConvBertForSequenceClassification', 'TFConvBertForTokenClassification', 'TFConvBertLayer', 'TFConvBertModel', 'TFConvBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
43
0
import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _a ( snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : List[str] = RobertaTokenizer _lowerCamelCase : Tuple = RobertaTokenizerFast _lowerCamelCase : str = True _lowerCamelCase : Tuple = {'cls_token': '<s>'} def __A ( self : Any ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A_ = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] A_ = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) ) A_ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] A_ = {"unk_token": "<unk>"} A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(UpperCAmelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(UpperCAmelCase ) ) def __A ( self : Any , **UpperCAmelCase : Union[str, Any] ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase ) def __A ( self : List[Any] , **UpperCAmelCase : List[str] ): kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase ) def __A ( self : str , UpperCAmelCase : Optional[int] ): A_ = "lower newer" A_ = "lower newer" return input_text, output_text def __A ( self : Union[str, Any] ): A_ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) A_ = "lower newer" A_ = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"] A_ = tokenizer.tokenize(UpperCAmelCase ) # , add_prefix_space=True) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) A_ = tokens + [tokenizer.unk_token] A_ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , UpperCAmelCase ) def __A ( self : Union[str, Any] ): A_ = self.get_tokenizer() self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=UpperCAmelCase ) , [0, 31414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=UpperCAmelCase ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , ) @slow def __A ( self : int ): A_ = self.tokenizer_class.from_pretrained("roberta-base" ) A_ = tokenizer.encode("sequence builders" , add_special_tokens=UpperCAmelCase ) A_ = tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCAmelCase ) A_ = tokenizer.encode( "sequence builders" , add_special_tokens=UpperCAmelCase , add_prefix_space=UpperCAmelCase ) A_ = tokenizer.encode( "sequence builders" , "multi-sequence build" , add_special_tokens=UpperCAmelCase , add_prefix_space=UpperCAmelCase ) A_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase ) A_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def __A ( self : Tuple ): A_ = self.get_tokenizer() A_ = "Encode this sequence." A_ = tokenizer.byte_encoder[" ".encode("utf-8" )[0]] # Testing encoder arguments A_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase , add_prefix_space=UpperCAmelCase ) A_ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(UpperCAmelCase , UpperCAmelCase ) A_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase , add_prefix_space=UpperCAmelCase ) A_ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(UpperCAmelCase , UpperCAmelCase ) tokenizer.add_special_tokens({"bos_token": "<s>"} ) A_ = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) A_ = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(UpperCAmelCase , UpperCAmelCase ) # Testing spaces after special tokens A_ = "<mask>" tokenizer.add_special_tokens( {"mask_token": AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase )} ) # mask token has a left space A_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase ) A_ = "Encode <mask> sequence" A_ = "Encode <mask>sequence" A_ = tokenizer.encode(UpperCAmelCase ) A_ = encoded.index(UpperCAmelCase ) A_ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(UpperCAmelCase , UpperCAmelCase ) A_ = tokenizer.encode(UpperCAmelCase ) A_ = encoded.index(UpperCAmelCase ) A_ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(UpperCAmelCase , UpperCAmelCase ) def __A ( self : Any ): pass def __A ( self : Optional[int] ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): A_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase ) A_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase ) A_ = "A, <mask> AllenNLP sentence." A_ = tokenizer_r.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_token_type_ids=UpperCAmelCase ) A_ = tokenizer_p.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase , return_token_type_ids=UpperCAmelCase ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) A_ = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) A_ = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( UpperCAmelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( UpperCAmelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) def __A ( self : Union[str, Any] ): for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): A_ = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase ) A_ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) A_ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["add_prefix_space"] , UpperCAmelCase ) self.assertEqual(post_processor_state["add_prefix_space"] , UpperCAmelCase ) self.assertEqual(post_processor_state["trim_offsets"] , UpperCAmelCase ) def __A ( self : List[Any] ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): A_ = "hello" # `hello` is a token in the vocabulary of `pretrained_name` A_ = f'''{text_of_1_token} {text_of_1_token}''' A_ = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase , use_fast=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase ) A_ = tokenizer_r(UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(UpperCAmelCase ) + 1, len(UpperCAmelCase ) + 1 + len(UpperCAmelCase )) , ) A_ = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase , use_fast=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase ) A_ = tokenizer_r(UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(UpperCAmelCase ) + 1, len(UpperCAmelCase ) + 1 + len(UpperCAmelCase )) , ) A_ = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase , use_fast=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase ) A_ = tokenizer_r(UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(UpperCAmelCase ), len(UpperCAmelCase ) + 1 + len(UpperCAmelCase )) , ) A_ = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase , use_fast=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase ) A_ = tokenizer_r(UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(UpperCAmelCase ), len(UpperCAmelCase ) + 1 + len(UpperCAmelCase )) , ) A_ = f''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) A_ = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase , use_fast=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase ) A_ = tokenizer_r(UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(UpperCAmelCase ) + 1, 1 + len(UpperCAmelCase ) + 1 + len(UpperCAmelCase )) , ) A_ = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase , use_fast=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase ) A_ = tokenizer_r(UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(UpperCAmelCase ), 1 + len(UpperCAmelCase ) + 1 + len(UpperCAmelCase )) , ) A_ = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase , use_fast=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase ) A_ = tokenizer_r(UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCAmelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(UpperCAmelCase ), 1 + len(UpperCAmelCase ) + 1 + len(UpperCAmelCase )) , )
86
import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _a ( unittest.TestCase ): def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]: """simple docstring""" super().tearDown() gc.collect() def lowerCamelCase_ ( self: Dict ) -> Tuple: """simple docstring""" lowercase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) lowercase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) lowercase__ = '''xvjiarui/stable-diffusion-2-inpainting''' lowercase__ , lowercase__ = FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ ) lowercase__ = '''Face of a yellow cat, high resolution, sitting on a park bench''' lowercase__ = jax.random.PRNGKey(0 ) lowercase__ = 50 lowercase__ = jax.device_count() lowercase__ = num_samples * [prompt] lowercase__ = num_samples * [init_image] lowercase__ = num_samples * [mask_image] lowercase__ , lowercase__ , lowercase__ = pipeline.prepare_inputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # shard inputs and rng lowercase__ = replicate(UpperCamelCase_ ) lowercase__ = jax.random.split(UpperCamelCase_ , jax.device_count() ) lowercase__ = shard(UpperCamelCase_ ) lowercase__ = shard(UpperCamelCase_ ) lowercase__ = shard(UpperCamelCase_ ) lowercase__ = pipeline( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , jit=UpperCamelCase_ ) lowercase__ = output.images.reshape(UpperCamelCase_ , 512 , 512 , 3 ) lowercase__ = images[0, 253:256, 253:256, -1] lowercase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowercase__ = jnp.array( [0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] ) print(f'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
43
0
from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING _lowerCamelCase : Tuple = logging.get_logger(__name__) @add_end_docstrings(UpperCAmelCase__ ) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : str , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int) ->Optional[int]: '''simple docstring''' super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__) requires_backends(self , '''decord''') self.check_model_type(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[Any]=None) ->Dict: '''simple docstring''' A__ = {} if frame_sampling_rate is not None: A__ = frame_sampling_rate if num_frames is not None: A__ = num_frames A__ = {} if top_k is not None: A__ = top_k return preprocess_params, {}, postprocess_params def __call__( self : str , UpperCAmelCase__ : Union[str, List[str]] , **UpperCAmelCase__ : Optional[int]) ->Dict: '''simple docstring''' return super().__call__(UpperCAmelCase__ , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[Any]=1) ->Any: '''simple docstring''' if num_frames is None: A__ = self.model.config.num_frames if video.startswith('''http://''') or video.startswith('''https://'''): A__ = BytesIO(requests.get(UpperCAmelCase__).content) A__ = VideoReader(UpperCAmelCase__) videoreader.seek(0) A__ = 0 A__ = num_frames * frame_sampling_rate - 1 A__ = np.linspace(UpperCAmelCase__ , UpperCAmelCase__ , num=UpperCAmelCase__ , dtype=np.intaa) A__ = videoreader.get_batch(UpperCAmelCase__).asnumpy() A__ = list(UpperCAmelCase__) A__ = self.image_processor(UpperCAmelCase__ , return_tensors=self.framework) return model_inputs def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Dict) ->int: '''simple docstring''' A__ = self.model(**UpperCAmelCase__) return model_outputs def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str=5) ->Optional[int]: '''simple docstring''' if top_k > self.model.config.num_labels: A__ = self.model.config.num_labels if self.framework == "pt": A__ = model_outputs.logits.softmax(-1)[0] A__ , A__ = probs.topk(UpperCAmelCase__) else: raise ValueError(f"""Unsupported framework: {self.framework}""") A__ = scores.tolist() A__ = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCAmelCase__ , UpperCAmelCase__)]
87
from __future__ import annotations import math def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if len(SCREAMING_SNAKE_CASE ) == 0: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , ) return min( minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , ) def _a ( ): """simple docstring""" lowercase__ = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23] lowercase__ = math.log(len(SCREAMING_SNAKE_CASE ) , 2 ) print('''Optimal value : ''' , end='''''' ) print(minimax(0 , 0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
43
0
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ) -> List[Any]: _lowerCamelCase : Union[str, Any] = parent _lowerCamelCase : int = batch_size _lowerCamelCase : Any = seq_length _lowerCamelCase : List[str] = is_training _lowerCamelCase : List[str] = use_token_type_ids _lowerCamelCase : List[Any] = use_labels _lowerCamelCase : Tuple = vocab_size _lowerCamelCase : Any = hidden_size _lowerCamelCase : str = num_hidden_layers _lowerCamelCase : List[Any] = num_attention_heads _lowerCamelCase : str = intermediate_size _lowerCamelCase : Tuple = hidden_act _lowerCamelCase : Optional[int] = hidden_dropout_prob _lowerCamelCase : List[Any] = attention_probs_dropout_prob _lowerCamelCase : Union[str, Any] = max_position_embeddings _lowerCamelCase : str = type_vocab_size _lowerCamelCase : int = type_sequence_label_size _lowerCamelCase : Optional[Any] = initializer_range _lowerCamelCase : List[str] = num_labels _lowerCamelCase : Tuple = num_choices _lowerCamelCase : Optional[int] = scope _lowerCamelCase : Any = self.vocab_size - 1 def UpperCamelCase_ ( self) -> Dict: _lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _lowerCamelCase : int = None if self.use_token_type_ids: _lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) _lowerCamelCase : List[str] = None _lowerCamelCase : Dict = None _lowerCamelCase : Optional[Any] = None if self.use_labels: _lowerCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size) _lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) _lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices) _lowerCamelCase : str = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) _lowerCamelCase : List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE) -> List[str]: _lowerCamelCase : List[Any] = OpenAIGPTModel(config=SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() _lowerCamelCase : int = model(SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , head_mask=SCREAMING_SNAKE_CASE) _lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE) -> Any: _lowerCamelCase : Optional[Any] = OpenAIGPTLMHeadModel(SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() _lowerCamelCase : Tuple = model(SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE) -> int: _lowerCamelCase : Any = OpenAIGPTDoubleHeadsModel(SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() _lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE) -> Union[str, Any]: _lowerCamelCase : Any = self.num_labels _lowerCamelCase : List[Any] = OpenAIGPTForSequenceClassification(SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() _lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size) _lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def UpperCamelCase_ ( self) -> Any: _lowerCamelCase : List[Any] = self.prepare_config_and_inputs() ( ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ) : Any = config_and_inputs _lowerCamelCase : Any = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask, } return config, inputs_dict @require_torch class lowercase__ ( A_ ,A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) __UpperCAmelCase = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly __UpperCAmelCase = ( { '''feature-extraction''': OpenAIGPTModel, '''text-classification''': OpenAIGPTForSequenceClassification, '''text-generation''': OpenAIGPTLMHeadModel, '''zero-shot''': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[str]: if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False) -> Union[str, Any]: _lowerCamelCase : List[str] = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": _lowerCamelCase : Tuple = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE , ) _lowerCamelCase : Optional[int] = inputs_dict["""labels"""] _lowerCamelCase : Dict = inputs_dict["""labels"""] _lowerCamelCase : Optional[int] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=SCREAMING_SNAKE_CASE , ) _lowerCamelCase : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE) return inputs_dict def UpperCamelCase_ ( self) -> List[Any]: _lowerCamelCase : Optional[int] = OpenAIGPTModelTester(self) _lowerCamelCase : List[str] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , n_embd=37) def UpperCamelCase_ ( self) -> str: self.config_tester.run_common_tests() def UpperCamelCase_ ( self) -> Optional[int]: _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Optional[int]: _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> str: for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : List[Any] = OpenAIGPTModel.from_pretrained(SCREAMING_SNAKE_CASE) self.assertIsNotNone(SCREAMING_SNAKE_CASE) @require_torch class lowercase__ ( unittest.TestCase ): @slow def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : Optional[Any] = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""") model.to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=SCREAMING_SNAKE_CASE) # the president is _lowerCamelCase : Union[str, Any] = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 4_0477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the _lowerCamelCase : Optional[Any] = model.generate(SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE) self.assertListEqual(output_ids[0].tolist() , SCREAMING_SNAKE_CASE)
88
class _a : def __init__( self: Tuple , UpperCamelCase_: Dict ) -> List[str]: """simple docstring""" lowercase__ = val lowercase__ = None lowercase__ = None def lowerCamelCase_ ( self: Any , UpperCamelCase_: Any ) -> Union[str, Any]: """simple docstring""" if self.val: if val < self.val: if self.left is None: lowercase__ = Node(UpperCamelCase_ ) else: self.left.insert(UpperCamelCase_ ) elif val > self.val: if self.right is None: lowercase__ = Node(UpperCamelCase_ ) else: self.right.insert(UpperCamelCase_ ) else: lowercase__ = val def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" if root: inorder(root.left , SCREAMING_SNAKE_CASE ) res.append(root.val ) inorder(root.right , SCREAMING_SNAKE_CASE ) def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" if len(SCREAMING_SNAKE_CASE ) == 0: return arr lowercase__ = Node(arr[0] ) for i in range(1 , len(SCREAMING_SNAKE_CASE ) ): root.insert(arr[i] ) # Traverse BST in order. lowercase__ = [] inorder(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
43
0
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class _lowerCamelCase: def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=2, lowerCamelCase=99, lowerCamelCase=0, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=2, lowerCamelCase=4, lowerCamelCase="last", lowerCamelCase=True, lowerCamelCase=None, lowerCamelCase=0, ) -> str: """simple docstring""" _lowercase : Union[str, Any] = parent _lowercase : Optional[Any] = batch_size _lowercase : List[str] = seq_length _lowercase : int = is_training _lowercase : List[str] = use_input_lengths _lowercase : int = use_token_type_ids _lowercase : Any = use_labels _lowercase : Union[str, Any] = gelu_activation _lowercase : List[str] = sinusoidal_embeddings _lowercase : str = causal _lowercase : Optional[int] = asm _lowercase : Union[str, Any] = n_langs _lowercase : List[Any] = vocab_size _lowercase : Any = n_special _lowercase : Any = hidden_size _lowercase : str = num_hidden_layers _lowercase : Union[str, Any] = num_attention_heads _lowercase : Tuple = hidden_dropout_prob _lowercase : Optional[int] = attention_probs_dropout_prob _lowercase : Union[str, Any] = max_position_embeddings _lowercase : List[str] = type_sequence_label_size _lowercase : Any = initializer_range _lowercase : int = num_labels _lowercase : Optional[int] = num_choices _lowercase : Optional[Any] = summary_type _lowercase : Optional[Any] = use_proj _lowercase : int = scope _lowercase : List[Any] = bos_token_id def UpperCamelCase ( self) -> Optional[int]: """simple docstring""" _lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) _lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length]) _lowercase : int = None if self.use_input_lengths: _lowercase : Dict = ( ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2 ) # small variation of seq_length _lowercase : Tuple = None if self.use_token_type_ids: _lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.n_langs) _lowercase : Tuple = None _lowercase : int = None _lowercase : int = None if self.use_labels: _lowercase : str = ids_tensor([self.batch_size], self.type_sequence_label_size) _lowercase : str = ids_tensor([self.batch_size, self.seq_length], self.num_labels) _lowercase : Dict = ids_tensor([self.batch_size], 2).float() _lowercase : Tuple = ids_tensor([self.batch_size], self.num_choices) _lowercase : Dict = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def UpperCamelCase ( self) -> Union[str, Any]: """simple docstring""" return XLMConfig( vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, num_labels=self.num_labels, bos_token_id=self.bos_token_id, ) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Tuple: """simple docstring""" _lowercase : List[Any] = XLMModel(config=lowerCamelCase) model.to(lowerCamelCase) model.eval() _lowercase : str = model(lowerCamelCase, lengths=lowerCamelCase, langs=lowerCamelCase) _lowercase : int = model(lowerCamelCase, langs=lowerCamelCase) _lowercase : Any = model(lowerCamelCase) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[Any]: """simple docstring""" _lowercase : Dict = XLMWithLMHeadModel(lowerCamelCase) model.to(lowerCamelCase) model.eval() _lowercase : int = model(lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> str: """simple docstring""" _lowercase : Tuple = XLMForQuestionAnsweringSimple(lowerCamelCase) model.to(lowerCamelCase) model.eval() _lowercase : Dict = model(lowerCamelCase) _lowercase : List[str] = model(lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase) _lowercase : Any = outputs self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Union[str, Any]: """simple docstring""" _lowercase : Tuple = XLMForQuestionAnswering(lowerCamelCase) model.to(lowerCamelCase) model.eval() _lowercase : Optional[Any] = model(lowerCamelCase) _lowercase : List[Any] = model( lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, cls_index=lowerCamelCase, is_impossible=lowerCamelCase, p_mask=lowerCamelCase, ) _lowercase : List[str] = model( lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, cls_index=lowerCamelCase, is_impossible=lowerCamelCase, ) ((_lowercase) , ) : Optional[Any] = result_with_labels.to_tuple() _lowercase : List[str] = model(lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase) ((_lowercase) , ) : Any = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape, ()) self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top)) self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top)) self.parent.assertEqual( result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top)) self.parent.assertEqual( result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top)) self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,)) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> int: """simple docstring""" _lowercase : Optional[Any] = XLMForSequenceClassification(lowerCamelCase) model.to(lowerCamelCase) model.eval() _lowercase : Optional[int] = model(lowerCamelCase) _lowercase : Optional[int] = model(lowerCamelCase, labels=lowerCamelCase) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[str]: """simple docstring""" _lowercase : Any = self.num_labels _lowercase : str = XLMForTokenClassification(lowerCamelCase) model.to(lowerCamelCase) model.eval() _lowercase : int = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Dict: """simple docstring""" _lowercase : Optional[Any] = self.num_choices _lowercase : Optional[int] = XLMForMultipleChoice(config=lowerCamelCase) model.to(lowerCamelCase) model.eval() _lowercase : Optional[Any] = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() _lowercase : int = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() _lowercase : Optional[Any] = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() _lowercase : List[str] = model( lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def UpperCamelCase ( self) -> Tuple: """simple docstring""" _lowercase : Dict = self.prepare_config_and_inputs() ( ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ) : Optional[Any] = config_and_inputs _lowercase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths} return config, inputs_dict @require_torch class _lowerCamelCase( _a, _a, _a, unittest.TestCase ): lowercase_ : Any = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) lowercase_ : Optional[int] = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable lowercase_ : Union[str, Any] = ( { """feature-extraction""": XLMModel, """fill-mask""": XLMWithLMHeadModel, """question-answering""": XLMForQuestionAnsweringSimple, """text-classification""": XLMForSequenceClassification, """text-generation""": XLMWithLMHeadModel, """token-classification""": XLMForTokenClassification, """zero-shot""": XLMForSequenceClassification, } if is_torch_available() else {} ) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]: """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast') ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=False) -> Optional[int]: """simple docstring""" _lowercase : Any = super()._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": _lowercase : Any = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase) _lowercase : Dict = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase) return inputs_dict def UpperCamelCase ( self) -> int: """simple docstring""" _lowercase : Union[str, Any] = XLMModelTester(self) _lowercase : List[str] = ConfigTester(self, config_class=lowerCamelCase, emb_dim=37) def UpperCamelCase ( self) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase ( self) -> List[Any]: """simple docstring""" _lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*lowerCamelCase) def UpperCamelCase ( self) -> List[str]: """simple docstring""" _lowercase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*lowerCamelCase) def UpperCamelCase ( self) -> Optional[Any]: """simple docstring""" _lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*lowerCamelCase) def UpperCamelCase ( self) -> int: """simple docstring""" _lowercase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*lowerCamelCase) def UpperCamelCase ( self) -> List[str]: """simple docstring""" _lowercase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*lowerCamelCase) def UpperCamelCase ( self) -> Dict: """simple docstring""" _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*lowerCamelCase) def UpperCamelCase ( self) -> Any: """simple docstring""" _lowercase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCamelCase) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=1) -> int: """simple docstring""" self.assertIsInstance(lowerCamelCase, lowerCamelCase) self.assertListEqual( [isinstance(lowerCamelCase, lowerCamelCase) for iter_attentions in attentions], [True] * len(lowerCamelCase)) self.assertEqual(len(lowerCamelCase), (max_length - min_length) * num_beam_groups) for idx, iter_attentions in enumerate(lowerCamelCase): # adds PAD dummy token _lowercase : Dict = min_length + idx + 1 _lowercase : int = min_length + idx + 1 _lowercase : Dict = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(lowerCamelCase)) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=1) -> List[Any]: """simple docstring""" self.assertIsInstance(lowerCamelCase, lowerCamelCase) self.assertListEqual( [isinstance(lowerCamelCase, lowerCamelCase) for iter_hidden_states in hidden_states], [True] * len(lowerCamelCase), ) self.assertEqual(len(lowerCamelCase), (max_length - min_length) * num_beam_groups) for idx, iter_hidden_states in enumerate(lowerCamelCase): # adds PAD dummy token _lowercase : int = min_length + idx + 1 _lowercase : int = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], [expected_shape] * len(lowerCamelCase), ) pass @slow def UpperCamelCase ( self) -> int: """simple docstring""" for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowercase : Dict = XLMModel.from_pretrained(lowerCamelCase) self.assertIsNotNone(lowerCamelCase) @require_torch class _lowerCamelCase( unittest.TestCase ): @slow def UpperCamelCase ( self) -> Union[str, Any]: """simple docstring""" _lowercase : Tuple = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048') model.to(lowerCamelCase) _lowercase : Union[str, Any] = torch.tensor([[14, 4_47]], dtype=torch.long, device=lowerCamelCase) # the president _lowercase : Any = [ 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference _lowercase : str = model.generate(lowerCamelCase, do_sample=lowerCamelCase) self.assertListEqual(output_ids[0].cpu().numpy().tolist(), lowerCamelCase)
89
lowerCAmelCase = { 'a': 'AAAAA', 'b': 'AAAAB', 'c': 'AAABA', 'd': 'AAABB', 'e': 'AABAA', 'f': 'AABAB', 'g': 'AABBA', 'h': 'AABBB', 'i': 'ABAAA', 'j': 'BBBAA', 'k': 'ABAAB', 'l': 'ABABA', 'm': 'ABABB', 'n': 'ABBAA', 'o': 'ABBAB', 'p': 'ABBBA', 'q': 'ABBBB', 'r': 'BAAAA', 's': 'BAAAB', 't': 'BAABA', 'u': 'BAABB', 'v': 'BBBAB', 'w': 'BABAA', 'x': 'BABAB', 'y': 'BABBA', 'z': 'BABBB', ' ': ' ', } lowerCAmelCase = {value: key for key, value in encode_dict.items()} def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = '''''' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('''encode() accepts only letters of the alphabet and spaces''' ) return encoded def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" if set(SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set(): raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' ) lowercase__ = '''''' for word in coded.split(): while len(SCREAMING_SNAKE_CASE ) != 0: decoded += decode_dict[word[:5]] lowercase__ = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
43
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCAmelCase = { '''configuration_squeezebert''': [ '''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SqueezeBertConfig''', '''SqueezeBertOnnxConfig''', ], '''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['''SqueezeBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ '''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SqueezeBertForMaskedLM''', '''SqueezeBertForMultipleChoice''', '''SqueezeBertForQuestionAnswering''', '''SqueezeBertForSequenceClassification''', '''SqueezeBertForTokenClassification''', '''SqueezeBertModel''', '''SqueezeBertModule''', '''SqueezeBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
90
import numpy as np def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" return 1 / (1 + np.exp(-vector )) if __name__ == "__main__": import doctest doctest.testmod()
43
0
"""simple docstring""" from __future__ import annotations from typing import Any class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' pass class lowerCAmelCase_ : '''simple docstring''' def __init__( self : int ,A_ : Any ) -> None: A = data A = None def __iter__( self : Tuple ) -> Any: A = self A = [] while node: if node in visited: raise ContainsLoopError visited.append(A_ ) yield node.data A = node.next_node @property def _SCREAMING_SNAKE_CASE ( self : str ) -> bool: try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": _lowercase = Node(1) _lowercase = Node(2) _lowercase = Node(3) _lowercase = Node(4) print(root_node.has_loop) # False _lowercase = root_node.next_node print(root_node.has_loop) # True _lowercase = Node(5) _lowercase = Node(6) _lowercase = Node(5) _lowercase = Node(6) print(root_node.has_loop) # False _lowercase = Node(1) print(root_node.has_loop) # False
91
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = '▁' lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'} lowerCAmelCase = { 'vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model', }, 'monolingual_vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt', }, } lowerCAmelCase = {'vinai/bartpho-syllable': 1024} class _a ( UpperCamelCase__ ): _lowercase : Tuple = VOCAB_FILES_NAMES _lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP _lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase : Any = ['''input_ids''', '''attention_mask'''] def __init__( self: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any]="<s>" , UpperCamelCase_: List[Any]="</s>" , UpperCamelCase_: Optional[int]="</s>" , UpperCamelCase_: List[str]="<s>" , UpperCamelCase_: Optional[int]="<unk>" , UpperCamelCase_: Optional[int]="<pad>" , UpperCamelCase_: Optional[int]="<mask>" , UpperCamelCase_: Optional[Dict[str, Any]] = None , **UpperCamelCase_: int , ) -> None: """simple docstring""" lowercase__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) lowercase__ = vocab_file lowercase__ = monolingual_vocab_file lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCamelCase_ ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility lowercase__ = {} lowercase__ = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids: lowercase__ = cnt cnt += 1 with open(UpperCamelCase_ , '''r''' , encoding='''utf-8''' ) as f: for line in f.readlines(): lowercase__ = line.strip().split()[0] lowercase__ = len(self.fairseq_tokens_to_ids ) if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids: lowercase__ = len(self.fairseq_tokens_to_ids ) lowercase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self: Tuple ) -> int: """simple docstring""" lowercase__ = self.__dict__.copy() lowercase__ = None lowercase__ = self.sp_model.serialized_model_proto() return state def __setstate__( self: List[str] , UpperCamelCase_: int ) -> List[Any]: """simple docstring""" lowercase__ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowercase__ = {} lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase__ = [self.cls_token_id] lowercase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1] def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowerCamelCase_ ( self: List[str] ) -> List[str]: """simple docstring""" return len(self.fairseq_ids_to_tokens ) def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]: """simple docstring""" lowercase__ = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCamelCase_ ( self: int , UpperCamelCase_: str ) -> List[str]: """simple docstring""" return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ ) def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Any ) -> Dict: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def lowerCamelCase_ ( self: str , UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return self.fairseq_ids_to_tokens[index] def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: int ) -> Dict: """simple docstring""" lowercase__ = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip() return out_string def lowerCamelCase_ ( self: Any , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(UpperCamelCase_ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return lowercase__ = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase__ = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase_ , '''wb''' ) as fi: lowercase__ = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_ ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( UpperCamelCase_ ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , UpperCamelCase_ ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'{str(UpperCamelCase_ )} \n' ) return out_vocab_file, out_monolingual_vocab_file
43
0
'''simple docstring''' from __future__ import annotations import math def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : int , __magic_name__ : bool , __magic_name__ : list[int] , __magic_name__ : float ) -> int: if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if len(__magic_name__ ) == 0: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , __magic_name__ , __magic_name__ , __magic_name__ ) , minimax(depth + 1 , node_index * 2 + 1 , __magic_name__ , __magic_name__ , __magic_name__ ) , ) return min( minimax(depth + 1 , node_index * 2 , __magic_name__ , __magic_name__ , __magic_name__ ) , minimax(depth + 1 , node_index * 2 + 1 , __magic_name__ , __magic_name__ , __magic_name__ ) , ) def _lowerCAmelCase ( ) -> None: lowercase : Tuple =[90, 23, 6, 33, 21, 65, 123, 34423] lowercase : str =math.log(len(__magic_name__ ) , 2 ) print('''Optimal value : ''' , end='''''' ) print(minimax(0 , 0 , __magic_name__ , __magic_name__ , __magic_name__ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
92
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase = logging.get_logger(__name__) def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = original_name.split('''.''' )[0] lowercase__ = key.split('''.''' ) lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 2] ) lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 1] ) lowercase__ = orig_block_num - offset lowercase__ = key.replace(f'{orig_block_num}.{layer_num}.{original_name}' , f'block.{new_block_num}.{layer_num}.{new_name}' ) return key def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = OrderedDict() lowercase__ , lowercase__ = 0, 0 for key, value in state_dict.items(): if key.startswith('''network''' ): lowercase__ = key.replace('''network''' , '''poolformer.encoder''' ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith('''bias''' ) and "patch_embed" not in key: patch_emb_offset += 1 lowercase__ = key[: key.find('''proj''' )] lowercase__ = key.replace(SCREAMING_SNAKE_CASE , f'patch_embeddings.{total_embed_found}.' ) lowercase__ = key.replace('''proj''' , '''projection''' ) if key.endswith('''bias''' ): total_embed_found += 1 if "patch_embeddings" in key: lowercase__ = '''poolformer.encoder.''' + key if "mlp.fc1" in key: lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc1''' , '''output.conv1''' ) if "mlp.fc2" in key: lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc2''' , '''output.conv2''' ) if "norm1" in key: lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm1''' , '''before_norm''' ) if "norm2" in key: lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm2''' , '''after_norm''' ) if "layer_scale_1" in key: lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_1''' , '''layer_scale_1''' ) if "layer_scale_2" in key: lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_2''' , '''layer_scale_2''' ) if "head" in key: lowercase__ = key.replace('''head''' , '''classifier''' ) lowercase__ = value return new_state_dict def _a ( ): """simple docstring""" lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ) return image @torch.no_grad() def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = PoolFormerConfig() # set attributes based on model_name lowercase__ = '''huggingface/label-files''' lowercase__ = model_name[-3:] lowercase__ = 10_00 lowercase__ = '''imagenet-1k-id2label.json''' lowercase__ = (1, 10_00) # set config attributes lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) ) lowercase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowercase__ = idalabel lowercase__ = {v: k for k, v in idalabel.items()} if size == "s12": lowercase__ = [2, 2, 6, 2] lowercase__ = [64, 1_28, 3_20, 5_12] lowercase__ = 4.0 lowercase__ = 0.9 elif size == "s24": lowercase__ = [4, 4, 12, 4] lowercase__ = [64, 1_28, 3_20, 5_12] lowercase__ = 4.0 lowercase__ = 0.9 elif size == "s36": lowercase__ = [6, 6, 18, 6] lowercase__ = [64, 1_28, 3_20, 5_12] lowercase__ = 4.0 lowercase__ = 1E-6 lowercase__ = 0.9 elif size == "m36": lowercase__ = [6, 6, 18, 6] lowercase__ = [96, 1_92, 3_84, 7_68] lowercase__ = 4.0 lowercase__ = 1E-6 lowercase__ = 0.95 elif size == "m48": lowercase__ = [8, 8, 24, 8] lowercase__ = [96, 1_92, 3_84, 7_68] lowercase__ = 4.0 lowercase__ = 1E-6 lowercase__ = 0.95 else: raise ValueError(f'Size {size} not supported' ) # load image processor lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE ) # Prepare image lowercase__ = prepare_img() lowercase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values logger.info(f'Converting model {model_name}...' ) # load original state dict lowercase__ = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device('''cpu''' ) ) # rename keys lowercase__ = rename_keys(SCREAMING_SNAKE_CASE ) # create HuggingFace model and load state dict lowercase__ = PoolFormerForImageClassification(SCREAMING_SNAKE_CASE ) model.load_state_dict(SCREAMING_SNAKE_CASE ) model.eval() # Define image processor lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE ) lowercase__ = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values # forward pass lowercase__ = model(SCREAMING_SNAKE_CASE ) lowercase__ = outputs.logits # define expected logit slices for different models if size == "s12": lowercase__ = torch.tensor([-0.3_045, -0.6_758, -0.4_869] ) elif size == "s24": lowercase__ = torch.tensor([0.4_402, -0.1_374, -0.8_045] ) elif size == "s36": lowercase__ = torch.tensor([-0.6_080, -0.5_133, -0.5_898] ) elif size == "m36": lowercase__ = torch.tensor([0.3_952, 0.2_263, -1.2_668] ) elif size == "m48": lowercase__ = torch.tensor([0.1_167, -0.0_656, -0.3_423] ) else: raise ValueError(f'Size {size} not supported' ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-2 ) # finally, save model and image processor logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' ) Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE ) model.save_pretrained(SCREAMING_SNAKE_CASE ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '--model_name', default='poolformer_s12', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) lowerCAmelCase = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
43
0
"""simple docstring""" def __A (_SCREAMING_SNAKE_CASE ) ->Dict: """simple docstring""" if not head: return True # split the list to two parts lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = head.next, head while fast and fast.next: lowerCAmelCase__ :Any = fast.next.next lowerCAmelCase__ :Any = slow.next lowerCAmelCase__ :List[Any] = slow.next lowerCAmelCase__ :Optional[Any] = None # Don't forget here! But forget still works! # reverse the second part lowerCAmelCase__ :Optional[int] = None while second: lowerCAmelCase__ :Any = second.next lowerCAmelCase__ :Optional[int] = node lowerCAmelCase__ :int = second lowerCAmelCase__ :List[Any] = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False lowerCAmelCase__ :List[str] = node.next lowerCAmelCase__ :Optional[Any] = head.next return True def __A (_SCREAMING_SNAKE_CASE ) ->Any: """simple docstring""" if not head or not head.next: return True # 1. Get the midpoint (slow) lowerCAmelCase__ :str = head while fast and fast.next: lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = fast.next.next, slow.next # 2. Push the second half into the stack lowerCAmelCase__ :Optional[Any] = [slow.val] while slow.next: lowerCAmelCase__ :List[Any] = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False lowerCAmelCase__ :int = cur.next return True def __A (_SCREAMING_SNAKE_CASE ) ->Optional[Any]: """simple docstring""" if not head or not head.next: return True lowerCAmelCase__ :str = {} lowerCAmelCase__ :str = 0 while head: if head.val in d: d[head.val].append(_SCREAMING_SNAKE_CASE ) else: lowerCAmelCase__ :Union[str, Any] = [pos] lowerCAmelCase__ :Any = head.next pos += 1 lowerCAmelCase__ :Dict = pos - 1 lowerCAmelCase__ :int = 0 for v in d.values(): if len(_SCREAMING_SNAKE_CASE ) % 2 != 0: middle += 1 else: lowerCAmelCase__ :Tuple = 0 for i in range(0 , len(_SCREAMING_SNAKE_CASE ) ): if v[i] + v[len(_SCREAMING_SNAKE_CASE ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
93
import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) lowerCAmelCase = logging.getLogger() def _a ( ): """simple docstring""" lowercase__ = argparse.ArgumentParser() parser.add_argument('''-f''' ) lowercase__ = parser.parse_args() return args.f def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = {} lowercase__ = os.path.join(SCREAMING_SNAKE_CASE , '''all_results.json''' ) if os.path.exists(SCREAMING_SNAKE_CASE ): with open(SCREAMING_SNAKE_CASE , '''r''' ) as f: lowercase__ = json.load(SCREAMING_SNAKE_CASE ) else: raise ValueError(f'can\'t find {path}' ) return results def _a ( ): """simple docstring""" lowercase__ = torch.cuda.is_available() and torch_device == '''cuda''' return is_using_cuda and is_apex_available() lowerCAmelCase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class _a ( UpperCamelCase__ ): @classmethod def lowerCamelCase_ ( cls: int ) -> Any: """simple docstring""" lowercase__ = tempfile.mkdtemp() lowercase__ = os.path.join(cls.tmpdir , '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) lowercase__ = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def lowerCamelCase_ ( cls: Optional[Any] ) -> Dict: """simple docstring""" shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''' ) run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''glue_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Tuple ) -> Any: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertLess(result['''perplexity'''] , 100 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''clm_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Dict ) -> Optional[Any]: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertLess(result['''perplexity'''] , 42 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''mlm_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Any ) -> int: """simple docstring""" lowercase__ = 7 if get_gpu_count() > 1 else 2 lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertLess(result['''train_loss'''] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''ner_no_trainer''' ) ) ) @unittest.skip(reason='''Fix me @muellerzr''' ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Union[str, Any] ) -> int: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result['''eval_f1'''] , 28 ) self.assertGreaterEqual(result['''eval_exact'''] , 28 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''qa_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: int ) -> str: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''swag_no_trainer''' ) ) ) @slow @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Tuple ) -> Any: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertGreaterEqual(result['''eval_rouge1'''] , 10 ) self.assertGreaterEqual(result['''eval_rouge2'''] , 2 ) self.assertGreaterEqual(result['''eval_rougeL'''] , 7 ) self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''summarization_no_trainer''' ) ) ) @slow @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertGreaterEqual(result['''eval_bleu'''] , 30 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''translation_no_trainer''' ) ) ) @slow def lowerCamelCase_ ( self: Optional[int] ) -> Dict: """simple docstring""" lowercase__ = logging.StreamHandler(sys.stdout ) logger.addHandler(UpperCamelCase_ ) lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[Any]: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''' ) run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) # The base model scores a 25% self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''step_1''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''image_classification_no_trainer''' ) ) )
43
0
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = logging.get_logger(__name__) SCREAMING_SNAKE_CASE = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} SCREAMING_SNAKE_CASE = { 'vocab_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model', }, 'tokenizer_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json', }, } SCREAMING_SNAKE_CASE = { 'xlnet-base-cased': None, 'xlnet-large-cased': None, } SCREAMING_SNAKE_CASE = '▁' # Segments (not really needed) SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = 2 SCREAMING_SNAKE_CASE = 3 SCREAMING_SNAKE_CASE = 4 class UpperCAmelCase_ ( __A ): """simple docstring""" UpperCamelCase_ = VOCAB_FILES_NAMES UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ = '''left''' UpperCamelCase_ = XLNetTokenizer def __init__( self : int , UpperCAmelCase : Dict=None , UpperCAmelCase : str=None , UpperCAmelCase : str=False , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : str="<unk>" , UpperCAmelCase : Optional[Any]="<sep>" , UpperCAmelCase : Optional[int]="<pad>" , UpperCAmelCase : Optional[Any]="<cls>" , UpperCAmelCase : Dict="<mask>" , UpperCAmelCase : int=["<eop>", "<eod>"] , **UpperCAmelCase : List[Any] , ) -> List[str]: '''simple docstring''' lowercase : Dict =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token super().__init__( vocab_file=UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , remove_space=UpperCAmelCase , keep_accents=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , **UpperCAmelCase , ) lowercase : Tuple =3 lowercase : Union[str, Any] =do_lower_case lowercase : Any =remove_space lowercase : int =keep_accents lowercase : int =vocab_file lowercase : Union[str, Any] =False if not self.vocab_file else True def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' lowercase : Union[str, Any] =[self.sep_token_id] lowercase : Optional[Any] =[self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def A__ ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' lowercase : Optional[int] =[self.sep_token_id] lowercase : Union[str, Any] =[2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def A__ ( self : str , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(UpperCAmelCase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return lowercase : Dict =os.path.join( UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ): copyfile(self.vocab_file , UpperCAmelCase ) return (out_vocab_file,)
94
from ...utils import logging from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel from .configuration_mta import MTaConfig lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = 'T5Config' class _a ( UpperCamelCase__ ): _lowercase : Optional[int] = '''mt5''' _lowercase : str = MTaConfig class _a ( UpperCamelCase__ ): _lowercase : Optional[Any] = '''mt5''' _lowercase : Optional[Any] = MTaConfig class _a ( UpperCamelCase__ ): _lowercase : Tuple = '''mt5''' _lowercase : Optional[Any] = MTaConfig
43
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase_ = { '''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''], '''tokenization_roberta''': ['''RobertaTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''RobertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RobertaForCausalLM''', '''RobertaForMaskedLM''', '''RobertaForMultipleChoice''', '''RobertaForQuestionAnswering''', '''RobertaForSequenceClassification''', '''RobertaForTokenClassification''', '''RobertaModel''', '''RobertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRobertaForCausalLM''', '''TFRobertaForMaskedLM''', '''TFRobertaForMultipleChoice''', '''TFRobertaForQuestionAnswering''', '''TFRobertaForSequenceClassification''', '''TFRobertaForTokenClassification''', '''TFRobertaMainLayer''', '''TFRobertaModel''', '''TFRobertaPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''FlaxRobertaForCausalLM''', '''FlaxRobertaForMaskedLM''', '''FlaxRobertaForMultipleChoice''', '''FlaxRobertaForQuestionAnswering''', '''FlaxRobertaForSequenceClassification''', '''FlaxRobertaForTokenClassification''', '''FlaxRobertaModel''', '''FlaxRobertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
95
from datetime import datetime import matplotlib.pyplot as plt import torch def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" for param in module.parameters(): lowercase__ = False def _a ( ): """simple docstring""" lowercase__ = '''cuda''' if torch.cuda.is_available() else '''cpu''' if torch.backends.mps.is_available() and torch.backends.mps.is_built(): lowercase__ = '''mps''' if device == "mps": print( '''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch''' ''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues''' ''' with generations.''' ) return device def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = plt.imshow(SCREAMING_SNAKE_CASE ) fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE ) fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE ) plt.show() def _a ( ): """simple docstring""" lowercase__ = datetime.now() lowercase__ = current_time.strftime('''%H:%M:%S''' ) return timestamp
43
0
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import ( AudioDiffusionPipeline, AutoencoderKL, DDIMScheduler, DDPMScheduler, DiffusionPipeline, Mel, UNetaDConditionModel, UNetaDModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class __A ( unittest.TestCase ): def lowerCamelCase__ ( self : str ) -> str: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]: torch.manual_seed(0 ) __magic_name__: Dict = UNetaDModel( sample_size=(3_2, 6_4) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , ) return model @property def lowerCamelCase__ ( self : List[str] ) -> Optional[int]: torch.manual_seed(0 ) __magic_name__: Tuple = UNetaDConditionModel( sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=1_0 , ) return model @property def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]: torch.manual_seed(0 ) __magic_name__: Any = AutoencoderKL( sample_size=(1_2_8, 6_4) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , ) __magic_name__: Union[str, Any] = UNetaDModel( sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , ) return vqvae, unet @slow def lowerCamelCase__ ( self : int ) -> Union[str, Any]: __magic_name__: Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator __magic_name__: Dict = Mel( x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , ) __magic_name__: Optional[int] = DDPMScheduler() __magic_name__: str = AudioDiffusionPipeline(vqvae=__snake_case , unet=self.dummy_unet , mel=__snake_case , scheduler=__snake_case ) __magic_name__: Any = pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) __magic_name__: Optional[Any] = torch.Generator(device=__snake_case ).manual_seed(4_2 ) __magic_name__: Any = pipe(generator=__snake_case , steps=4 ) __magic_name__: Any = output.audios[0] __magic_name__: List[str] = output.images[0] __magic_name__: int = torch.Generator(device=__snake_case ).manual_seed(4_2 ) __magic_name__: List[str] = pipe(generator=__snake_case , steps=4 , return_dict=__snake_case ) __magic_name__: Optional[int] = output[0][0] assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length) assert ( image.height == self.dummy_unet.config.sample_size[0] and image.width == self.dummy_unet.config.sample_size[1] ) __magic_name__: Tuple = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0] __magic_name__: List[str] = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:1_0] __magic_name__: str = np.array([6_9, 2_5_5, 2_5_5, 2_5_5, 0, 0, 7_7, 1_8_1, 1_2, 1_2_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0 __magic_name__: Optional[Any] = Mel( x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , ) __magic_name__: Optional[Any] = DDIMScheduler() __magic_name__: Optional[Any] = self.dummy_vqvae_and_unet __magic_name__: int = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__snake_case , scheduler=__snake_case ) __magic_name__: str = pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) np.random.seed(0 ) __magic_name__: Any = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) ) __magic_name__: Any = torch.Generator(device=__snake_case ).manual_seed(4_2 ) __magic_name__: Optional[Any] = pipe(raw_audio=__snake_case , generator=__snake_case , start_step=5 , steps=1_0 ) __magic_name__: List[str] = output.images[0] assert ( image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0] and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1] ) __magic_name__: str = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0] __magic_name__: Optional[Any] = np.array([1_2_0, 1_1_7, 1_1_0, 1_0_9, 1_3_8, 1_6_7, 1_3_8, 1_4_8, 1_3_2, 1_2_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 __magic_name__: int = self.dummy_unet_condition __magic_name__: List[Any] = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=__snake_case , mel=__snake_case , scheduler=__snake_case ) __magic_name__: List[Any] = pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) np.random.seed(0 ) __magic_name__: Tuple = torch.rand((1, 1, 1_0) ) __magic_name__: Tuple = pipe(generator=__snake_case , encoding=__snake_case ) __magic_name__: int = output.images[0] __magic_name__: List[Any] = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0] __magic_name__: Optional[int] = np.array([1_0_7, 1_0_3, 1_2_0, 1_2_7, 1_4_2, 1_2_2, 1_1_3, 1_2_2, 9_7, 1_1_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 @slow @require_torch_gpu class __A ( unittest.TestCase ): def lowerCamelCase__ ( self : Any ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]: __magic_name__: Dict = torch_device __magic_name__: Dict = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" ) __magic_name__: List[Any] = pipe.to(__snake_case ) pipe.set_progress_bar_config(disable=__snake_case ) __magic_name__: Dict = torch.Generator(device=__snake_case ).manual_seed(4_2 ) __magic_name__: str = pipe(generator=__snake_case ) __magic_name__: str = output.audios[0] __magic_name__: Any = output.images[0] assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length) assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1] __magic_name__: List[str] = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0] __magic_name__: Any = np.array([1_5_1, 1_6_7, 1_5_4, 1_4_4, 1_2_2, 1_3_4, 1_2_1, 1_0_5, 7_0, 2_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
96
from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _a : def __init__( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Optional[Any]=13 , UpperCamelCase_: Any=30 , UpperCamelCase_: Union[str, Any]=2 , UpperCamelCase_: Tuple=3 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[Any]=32 , UpperCamelCase_: int=2 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=37 , UpperCamelCase_: int="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Optional[int]=10 , UpperCamelCase_: List[str]=0.02 , UpperCamelCase_: List[Any]=3 , UpperCamelCase_: Any=0.6 , UpperCamelCase_: Any=None , ) -> str: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = image_size lowercase__ = patch_size lowercase__ = num_channels lowercase__ = is_training lowercase__ = use_labels lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = type_sequence_label_size lowercase__ = initializer_range lowercase__ = mask_ratio lowercase__ = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowercase__ = (image_size // patch_size) ** 2 lowercase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def lowerCamelCase_ ( self: List[str] ) -> str: """simple docstring""" lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self: Dict ) -> List[str]: """simple docstring""" return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , UpperCamelCase_: List[Any] ) -> Optional[Any]: """simple docstring""" lowercase__ = TFViTMAEModel(config=UpperCamelCase_ ) lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: Any ) -> Union[str, Any]: """simple docstring""" lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ ) lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ ) # expected sequence length = num_patches lowercase__ = (self.image_size // self.patch_size) ** 2 lowercase__ = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images lowercase__ = 1 lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ ) lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ ) lowercase__ = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def lowerCamelCase_ ( self: str ) -> int: """simple docstring""" lowercase__ = self.prepare_config_and_inputs() ((lowercase__) , (lowercase__) , (lowercase__)) = config_and_inputs lowercase__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): _lowercase : int = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () _lowercase : List[str] = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {} _lowercase : Optional[int] = False _lowercase : List[str] = False _lowercase : Optional[int] = False _lowercase : Optional[int] = False def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]: """simple docstring""" lowercase__ = TFViTMAEModelTester(self ) lowercase__ = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 ) def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''ViTMAE does not use inputs_embeds''' ) def lowerCamelCase_ ( self: Dict ) -> List[str]: """simple docstring""" pass def lowerCamelCase_ ( self: List[Any] ) -> List[Any]: """simple docstring""" lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(UpperCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) lowercase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase_ , tf.keras.layers.Layer ) ) def lowerCamelCase_ ( self: Optional[int] ) -> List[str]: """simple docstring""" lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(UpperCamelCase_ ) lowercase__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ = [*signature.parameters.keys()] lowercase__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCamelCase_ ) def lowerCamelCase_ ( self: Tuple ) -> int: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def lowerCamelCase_ ( self: int ) -> str: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase_ ) def lowerCamelCase_ ( self: Union[str, Any] ) -> Any: """simple docstring""" np.random.seed(2 ) lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ = int((config.image_size // config.patch_size) ** 2 ) lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowercase__ = model_class(UpperCamelCase_ ) lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ ) lowercase__ = copy.deepcopy(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) ) lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ ) lowercase__ = outputs_dict[0].numpy() lowercase__ = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 ) def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]: """simple docstring""" np.random.seed(2 ) lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ = int((config.image_size // config.patch_size) ** 2 ) lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(UpperCamelCase_: List[Any] ): lowercase__ = {} for k, v in inputs_dict.items(): if tf.is_tensor(UpperCamelCase_ ): lowercase__ = v.numpy() else: lowercase__ = np.array(UpperCamelCase_ ) return inputs_np_dict for model_class in self.all_model_classes: lowercase__ = model_class(UpperCamelCase_ ) lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = prepare_numpy_arrays(UpperCamelCase_ ) lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ ) lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ ) self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase_ ( self: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] , UpperCamelCase_: Tuple ) -> str: """simple docstring""" np.random.seed(2 ) lowercase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowercase__ = tf.constant(UpperCamelCase_ ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowercase__ = tf_noise super().check_pt_tf_models(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase_ ( self: Dict ) -> Dict: """simple docstring""" np.random.seed(2 ) lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(UpperCamelCase_ ) if module_member_name.endswith('''MainLayer''' ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )] for module_member in (getattr(UpperCamelCase_ , UpperCamelCase_ ),) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(UpperCamelCase_ , '''_keras_serializable''' , UpperCamelCase_ ) } lowercase__ = int((config.image_size // config.patch_size) ** 2 ) lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowercase__ = tf.convert_to_tensor(UpperCamelCase_ ) inputs_dict.update({'''noise''': noise} ) for main_layer_class in tf_main_layer_classes: lowercase__ = main_layer_class(UpperCamelCase_ ) lowercase__ = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } lowercase__ = tf.keras.Model(UpperCamelCase_ , outputs=main_layer(UpperCamelCase_ ) ) lowercase__ = model(UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase__ = os.path.join(UpperCamelCase_ , '''keras_model.h5''' ) model.save(UpperCamelCase_ ) lowercase__ = tf.keras.models.load_model( UpperCamelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(UpperCamelCase_ , tf.keras.Model ) lowercase__ = model(UpperCamelCase_ ) self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ ) @slow def lowerCamelCase_ ( self: List[Any] ) -> Optional[Any]: """simple docstring""" np.random.seed(2 ) lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ = int((config.image_size // config.patch_size) ** 2 ) lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowercase__ = model_class(UpperCamelCase_ ) lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ ) if model_class.__name__ == "TFViTMAEModel": lowercase__ = outputs.last_hidden_state.numpy() lowercase__ = 0 else: lowercase__ = outputs.logits.numpy() lowercase__ = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase_ , saved_model=UpperCamelCase_ ) lowercase__ = model_class.from_pretrained(UpperCamelCase_ ) lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ ) if model_class.__name__ == "TFViTMAEModel": lowercase__ = after_outputs['''last_hidden_state'''].numpy() lowercase__ = 0 else: lowercase__ = after_outputs['''logits'''].numpy() lowercase__ = 0 lowercase__ = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(UpperCamelCase_ , 1E-5 ) def lowerCamelCase_ ( self: Tuple ) -> List[Any]: """simple docstring""" np.random.seed(2 ) lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ = int((config.image_size // config.patch_size) ** 2 ) lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowercase__ = model_class(UpperCamelCase_ ) lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ ) lowercase__ = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(UpperCamelCase_ ) lowercase__ = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config lowercase__ = model_class.from_config(model.config ) lowercase__ = new_model(UpperCamelCase_ ) # Build model new_model.set_weights(model.get_weights() ) lowercase__ = new_model(UpperCamelCase_ , noise=UpperCamelCase_ ) self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ ) @unittest.skip( reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.''' ) def lowerCamelCase_ ( self: Optional[int] ) -> str: """simple docstring""" pass @unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' ) def lowerCamelCase_ ( self: Any ) -> Dict: """simple docstring""" pass @slow def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]: """simple docstring""" lowercase__ = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' ) self.assertIsNotNone(UpperCamelCase_ ) def _a ( ): """simple docstring""" lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class _a ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self: Tuple ) -> Tuple: """simple docstring""" return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None @slow def lowerCamelCase_ ( self: int ) -> Optional[int]: """simple docstring""" np.random.seed(2 ) lowercase__ = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ) lowercase__ = self.default_image_processor lowercase__ = prepare_img() lowercase__ = image_processor(images=UpperCamelCase_ , return_tensors='''tf''' ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowercase__ = ViTMAEConfig() lowercase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) lowercase__ = np.random.uniform(size=(1, num_patches) ) # forward pass lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ ) # verify the logits lowercase__ = tf.convert_to_tensor([1, 196, 768] ) self.assertEqual(outputs.logits.shape , UpperCamelCase_ ) lowercase__ = tf.convert_to_tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , UpperCamelCase_ , atol=1E-4 )
43
0
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowercase__( UpperCAmelCase ): """simple docstring""" a :Optional[Any] = 'ClapFeatureExtractor' a :List[str] = ('RobertaTokenizer', 'RobertaTokenizerFast') def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int ) -> Tuple: super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def __call__( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , **SCREAMING_SNAKE_CASE_ : int ) -> str: lowercase_ = kwargs.pop('''sampling_rate''' , SCREAMING_SNAKE_CASE_ ) if text is None and audios is None: raise ValueError('''You have to specify either text or audios. Both cannot be none.''' ) if text is not None: lowercase_ = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if audios is not None: lowercase_ = self.feature_extractor( SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if text is not None and audios is not None: lowercase_ = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_ ) , tensor_type=SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : str , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : str ) -> Tuple: return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : List[Any] , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[str]: return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) @property def _lowercase ( self : List[Any] ) -> Optional[Any]: lowercase_ = self.tokenizer.model_input_names lowercase_ = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
97
def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" return "".join([hex(SCREAMING_SNAKE_CASE )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE )] ) def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" if (len(SCREAMING_SNAKE_CASE ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(SCREAMING_SNAKE_CASE ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
43
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : List[str] = logging.get_logger(__name__) lowercase__ : int = { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : List[Any] = 'gpt_neox' def __init__( self : str , lowerCAmelCase__ : Optional[int]=50432 , lowerCAmelCase__ : Optional[int]=6144 , lowerCAmelCase__ : Optional[Any]=44 , lowerCAmelCase__ : Optional[Any]=64 , lowerCAmelCase__ : List[str]=24576 , lowerCAmelCase__ : Any="gelu" , lowerCAmelCase__ : Any=0.25 , lowerCAmelCase__ : str=10000 , lowerCAmelCase__ : Any=0.0 , lowerCAmelCase__ : Optional[Any]=0.0 , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : Dict=2048 , lowerCAmelCase__ : Optional[Any]=0.02 , lowerCAmelCase__ : Tuple=1e-5 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Dict=0 , lowerCAmelCase__ : Union[str, Any]=2 , lowerCAmelCase__ : int=False , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : int=None , **lowerCAmelCase__ : int , ) -> Tuple: '''simple docstring''' super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) _UpperCamelCase = vocab_size _UpperCamelCase = max_position_embeddings _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = rotary_pct _UpperCamelCase = rotary_emb_base _UpperCamelCase = attention_dropout _UpperCamelCase = hidden_dropout _UpperCamelCase = classifier_dropout _UpperCamelCase = initializer_range _UpperCamelCase = layer_norm_eps _UpperCamelCase = use_cache _UpperCamelCase = tie_word_embeddings _UpperCamelCase = use_parallel_residual _UpperCamelCase = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( '''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' ) def snake_case__ ( self : str ) -> Tuple: '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling , lowerCAmelCase__ ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' f"""got {self.rope_scaling}""" ) _UpperCamelCase = self.rope_scaling.get('''type''' , lowerCAmelCase__ ) _UpperCamelCase = self.rope_scaling.get('''factor''' , lowerCAmelCase__ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or rope_scaling_factor <= 1.0: raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
98
from __future__ import annotations def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ , lowercase__ = position lowercase__ = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] lowercase__ = [] for position in positions: lowercase__ , lowercase__ = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(SCREAMING_SNAKE_CASE ) return permissible_positions def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" return not any(elem == 0 for row in board for elem in row ) def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" if is_complete(SCREAMING_SNAKE_CASE ): return True for position in get_valid_pos(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ): lowercase__ , lowercase__ = position if board[y][x] == 0: lowercase__ = curr + 1 if open_knight_tour_helper(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , curr + 1 ): return True lowercase__ = 0 return False def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = [[0 for i in range(SCREAMING_SNAKE_CASE )] for j in range(SCREAMING_SNAKE_CASE )] for i in range(SCREAMING_SNAKE_CASE ): for j in range(SCREAMING_SNAKE_CASE ): lowercase__ = 1 if open_knight_tour_helper(SCREAMING_SNAKE_CASE , (i, j) , 1 ): return board lowercase__ = 0 lowercase__ = f'Open Kight Tour cannot be performed on a board of size {n}' raise ValueError(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
43
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available SCREAMING_SNAKE_CASE = { 'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'], 'tokenization_m2m_100': ['M2M100Tokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE = [ 'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST', 'M2M100ForConditionalGeneration', 'M2M100Model', 'M2M100PreTrainedModel', ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
99
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name lowerCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n' @dataclass class _a ( UpperCamelCase__ ): _lowercase : Union[PIL.Image.Image, np.ndarray] class _a ( UpperCamelCase__ ): def __init__( self: Dict , UpperCamelCase_: PriorTransformer , UpperCamelCase_: CLIPVisionModel , UpperCamelCase_: CLIPImageProcessor , UpperCamelCase_: HeunDiscreteScheduler , UpperCamelCase_: ShapERenderer , ) -> List[str]: """simple docstring""" super().__init__() self.register_modules( prior=UpperCamelCase_ , image_encoder=UpperCamelCase_ , image_processor=UpperCamelCase_ , scheduler=UpperCamelCase_ , renderer=UpperCamelCase_ , ) def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple ) -> List[Any]: """simple docstring""" if latents is None: lowercase__ = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ ) else: if latents.shape != shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' ) lowercase__ = latents.to(UpperCamelCase_ ) lowercase__ = latents * scheduler.init_noise_sigma return latents def lowerCamelCase_ ( self: str , UpperCamelCase_: Tuple=0 ) -> int: """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) lowercase__ = torch.device(f'cuda:{gpu_id}' ) lowercase__ = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(UpperCamelCase_ , UpperCamelCase_ ) @property def lowerCamelCase_ ( self: List[Any] ) -> Dict: """simple docstring""" if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ): return self.device for module in self.image_encoder.modules(): if ( hasattr(UpperCamelCase_ , '''_hf_hook''' ) and hasattr(module._hf_hook , '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Tuple , UpperCamelCase_: str , ) -> Any: """simple docstring""" if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , torch.Tensor ): lowercase__ = torch.cat(UpperCamelCase_ , axis=0 ) if image[0].ndim == 4 else torch.stack(UpperCamelCase_ , axis=0 ) if not isinstance(UpperCamelCase_ , torch.Tensor ): lowercase__ = self.image_processor(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 ) lowercase__ = image.to(dtype=self.image_encoder.dtype , device=UpperCamelCase_ ) lowercase__ = self.image_encoder(UpperCamelCase_ )['''last_hidden_state'''] lowercase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 lowercase__ = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 ) if do_classifier_free_guidance: lowercase__ = torch.zeros_like(UpperCamelCase_ ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes lowercase__ = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(UpperCamelCase_ ) def __call__( self: Tuple , UpperCamelCase_: Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 25 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 64 , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ) -> Union[str, Any]: """simple docstring""" if isinstance(UpperCamelCase_ , PIL.Image.Image ): lowercase__ = 1 elif isinstance(UpperCamelCase_ , torch.Tensor ): lowercase__ = image.shape[0] elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): lowercase__ = len(UpperCamelCase_ ) else: raise ValueError( f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(UpperCamelCase_ )}' ) lowercase__ = self._execution_device lowercase__ = batch_size * num_images_per_prompt lowercase__ = guidance_scale > 1.0 lowercase__ = self._encode_image(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # prior self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ ) lowercase__ = self.scheduler.timesteps lowercase__ = self.prior.config.num_embeddings lowercase__ = self.prior.config.embedding_dim lowercase__ = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim lowercase__ = latents.reshape(latents.shape[0] , UpperCamelCase_ , UpperCamelCase_ ) for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ): # expand the latents if we are doing classifier free guidance lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowercase__ = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = self.prior( UpperCamelCase_ , timestep=UpperCamelCase_ , proj_embedding=UpperCamelCase_ , ).predicted_image_embedding # remove the variance lowercase__ , lowercase__ = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: lowercase__ , lowercase__ = noise_pred.chunk(2 ) lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) lowercase__ = self.scheduler.step( UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=UpperCamelCase_ ) lowercase__ = [] for i, latent in enumerate(UpperCamelCase_ ): print() lowercase__ = self.renderer.decode( latent[None, :] , UpperCamelCase_ , size=UpperCamelCase_ , ray_batch_size=4_096 , n_coarse_samples=64 , n_fine_samples=128 , ) images.append(UpperCamelCase_ ) lowercase__ = torch.stack(UpperCamelCase_ ) if output_type not in ["np", "pil"]: raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' ) lowercase__ = images.cpu().numpy() if output_type == "pil": lowercase__ = [self.numpy_to_pil(UpperCamelCase_ ) for image in images] # Offload last model to CPU if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=UpperCamelCase_ )
43
0
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _A : Any = logging.get_logger(__name__) _A : Union[str, Any] = { """microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""", } class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowerCamelCase__ : str = """focalnet""" def __init__( self , A_=2_24 , A_=4 , A_=3 , A_=96 , A_=False , A_=[1_92, 3_84, 7_68, 7_68] , A_=[2, 2, 6, 2] , A_=[2, 2, 2, 2] , A_=[3, 3, 3, 3] , A_="gelu" , A_=4.0 , A_=0.0 , A_=0.1 , A_=False , A_=1E-4 , A_=False , A_=False , A_=False , A_=0.02 , A_=1E-5 , A_=32 , A_=None , A_=None , **A_ , ): '''simple docstring''' super().__init__(**A_ ) SCREAMING_SNAKE_CASE__ = image_size SCREAMING_SNAKE_CASE__ = patch_size SCREAMING_SNAKE_CASE__ = num_channels SCREAMING_SNAKE_CASE__ = embed_dim SCREAMING_SNAKE_CASE__ = use_conv_embed SCREAMING_SNAKE_CASE__ = hidden_sizes SCREAMING_SNAKE_CASE__ = depths SCREAMING_SNAKE_CASE__ = focal_levels SCREAMING_SNAKE_CASE__ = focal_windows SCREAMING_SNAKE_CASE__ = hidden_act SCREAMING_SNAKE_CASE__ = mlp_ratio SCREAMING_SNAKE_CASE__ = hidden_dropout_prob SCREAMING_SNAKE_CASE__ = drop_path_rate SCREAMING_SNAKE_CASE__ = use_layerscale SCREAMING_SNAKE_CASE__ = layerscale_value SCREAMING_SNAKE_CASE__ = use_post_layernorm SCREAMING_SNAKE_CASE__ = use_post_layernorm_in_modulation SCREAMING_SNAKE_CASE__ = normalize_modulator SCREAMING_SNAKE_CASE__ = initializer_range SCREAMING_SNAKE_CASE__ = layer_norm_eps SCREAMING_SNAKE_CASE__ = encoder_stride SCREAMING_SNAKE_CASE__ = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_aligned_output_features_output_indices( out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
100
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo lowerCAmelCase = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n' lowerCAmelCase = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n' lowerCAmelCase = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _a ( datasets.Metric ): def lowerCamelCase_ ( self: Tuple ) -> MetricInfo: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ), } ) , ) def lowerCamelCase_ ( self: str , UpperCamelCase_: List[List[List[str]]] , UpperCamelCase_: List[List[str]] , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 4 , ) -> Dict[str, float]: """simple docstring""" return { "google_bleu": gleu_score.corpus_gleu( list_of_references=UpperCamelCase_ , hypotheses=UpperCamelCase_ , min_len=UpperCamelCase_ , max_len=UpperCamelCase_ ) }
43
0
from itertools import count def a__ ( A__ = 5_0 ): SCREAMING_SNAKE_CASE_ : str = [1] * min_block_length for n in count(A__ ): fill_count_functions.append(1 ) for block_length in range(A__, n + 1 ): for block_start in range(n - block_length ): fill_count_functions[n] += fill_count_functions[ n - block_start - block_length - 1 ] fill_count_functions[n] += 1 if fill_count_functions[n] > 1_0_0_0_0_0_0: break return n if __name__ == "__main__": print(F"""{solution() = }""")
101
import unittest from diffusers.models.unet_ad_blocks import * # noqa F403 from diffusers.utils import torch_device from .test_unet_blocks_common import UNetBlockTesterMixin class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Optional[Any] = DownBlockaD # noqa F405 _lowercase : Dict = '''down''' def lowerCamelCase_ ( self: List[str] ) -> Tuple: """simple docstring""" lowercase__ = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : List[str] = ResnetDownsampleBlockaD # noqa F405 _lowercase : Tuple = '''down''' def lowerCamelCase_ ( self: List[Any] ) -> str: """simple docstring""" lowercase__ = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Any = AttnDownBlockaD # noqa F405 _lowercase : List[Any] = '''down''' def lowerCamelCase_ ( self: Dict ) -> List[str]: """simple docstring""" lowercase__ = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Tuple = CrossAttnDownBlockaD # noqa F405 _lowercase : Optional[int] = '''down''' def lowerCamelCase_ ( self: Optional[Any] ) -> Any: """simple docstring""" lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common() lowercase__ = 32 return init_dict, inputs_dict def lowerCamelCase_ ( self: str ) -> Tuple: """simple docstring""" lowercase__ = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Any = SimpleCrossAttnDownBlockaD # noqa F405 _lowercase : str = '''down''' @property def lowerCamelCase_ ( self: Optional[Any] ) -> List[Any]: """simple docstring""" return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ ) def lowerCamelCase_ ( self: List[str] ) -> List[str]: """simple docstring""" lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common() lowercase__ = 32 return init_dict, inputs_dict @unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' ) def lowerCamelCase_ ( self: Any ) -> int: """simple docstring""" lowercase__ = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Tuple = SkipDownBlockaD # noqa F405 _lowercase : Tuple = '''down''' @property def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]: """simple docstring""" return super().get_dummy_input(include_skip_sample=UpperCamelCase_ ) def lowerCamelCase_ ( self: Dict ) -> List[Any]: """simple docstring""" lowercase__ = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Optional[int] = AttnSkipDownBlockaD # noqa F405 _lowercase : Optional[int] = '''down''' @property def lowerCamelCase_ ( self: str ) -> int: """simple docstring""" return super().get_dummy_input(include_skip_sample=UpperCamelCase_ ) def lowerCamelCase_ ( self: Tuple ) -> Any: """simple docstring""" lowercase__ = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : int = DownEncoderBlockaD # noqa F405 _lowercase : List[Any] = '''down''' @property def lowerCamelCase_ ( self: List[str] ) -> str: """simple docstring""" return super().get_dummy_input(include_temb=UpperCamelCase_ ) def lowerCamelCase_ ( self: Any ) -> List[Any]: """simple docstring""" lowercase__ = { '''in_channels''': 32, '''out_channels''': 32, } lowercase__ = self.dummy_input return init_dict, inputs_dict def lowerCamelCase_ ( self: str ) -> Dict: """simple docstring""" lowercase__ = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : List[str] = AttnDownEncoderBlockaD # noqa F405 _lowercase : int = '''down''' @property def lowerCamelCase_ ( self: Dict ) -> Optional[Any]: """simple docstring""" return super().get_dummy_input(include_temb=UpperCamelCase_ ) def lowerCamelCase_ ( self: str ) -> List[str]: """simple docstring""" lowercase__ = { '''in_channels''': 32, '''out_channels''': 32, } lowercase__ = self.dummy_input return init_dict, inputs_dict def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]: """simple docstring""" lowercase__ = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Union[str, Any] = UNetMidBlockaD # noqa F405 _lowercase : Union[str, Any] = '''mid''' def lowerCamelCase_ ( self: Any ) -> int: """simple docstring""" lowercase__ = { '''in_channels''': 32, '''temb_channels''': 128, } lowercase__ = self.dummy_input return init_dict, inputs_dict def lowerCamelCase_ ( self: Any ) -> Any: """simple docstring""" lowercase__ = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Optional[int] = UNetMidBlockaDCrossAttn # noqa F405 _lowercase : str = '''mid''' def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]: """simple docstring""" lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common() lowercase__ = 32 return init_dict, inputs_dict def lowerCamelCase_ ( self: Dict ) -> List[str]: """simple docstring""" lowercase__ = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Tuple = UNetMidBlockaDSimpleCrossAttn # noqa F405 _lowercase : str = '''mid''' @property def lowerCamelCase_ ( self: int ) -> List[Any]: """simple docstring""" return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ ) def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[Any]: """simple docstring""" lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common() lowercase__ = 32 return init_dict, inputs_dict def lowerCamelCase_ ( self: Union[str, Any] ) -> int: """simple docstring""" lowercase__ = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Union[str, Any] = UpBlockaD # noqa F405 _lowercase : Any = '''up''' @property def lowerCamelCase_ ( self: str ) -> str: """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ ) def lowerCamelCase_ ( self: int ) -> List[Any]: """simple docstring""" lowercase__ = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Tuple = ResnetUpsampleBlockaD # noqa F405 _lowercase : List[Any] = '''up''' @property def lowerCamelCase_ ( self: List[Any] ) -> Union[str, Any]: """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ ) def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[int]: """simple docstring""" lowercase__ = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Any = CrossAttnUpBlockaD # noqa F405 _lowercase : List[str] = '''up''' @property def lowerCamelCase_ ( self: int ) -> Any: """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ ) def lowerCamelCase_ ( self: Any ) -> Any: """simple docstring""" lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common() lowercase__ = 32 return init_dict, inputs_dict def lowerCamelCase_ ( self: Dict ) -> Optional[int]: """simple docstring""" lowercase__ = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Union[str, Any] = SimpleCrossAttnUpBlockaD # noqa F405 _lowercase : Dict = '''up''' @property def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]: """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ , include_encoder_hidden_states=UpperCamelCase_ ) def lowerCamelCase_ ( self: str ) -> int: """simple docstring""" lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common() lowercase__ = 32 return init_dict, inputs_dict def lowerCamelCase_ ( self: Union[str, Any] ) -> int: """simple docstring""" lowercase__ = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : List[str] = AttnUpBlockaD # noqa F405 _lowercase : Optional[Any] = '''up''' @property def lowerCamelCase_ ( self: Tuple ) -> int: """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ ) @unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' ) def lowerCamelCase_ ( self: List[str] ) -> List[str]: """simple docstring""" lowercase__ = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Dict = SkipUpBlockaD # noqa F405 _lowercase : Optional[int] = '''up''' @property def lowerCamelCase_ ( self: Dict ) -> int: """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ ) def lowerCamelCase_ ( self: Optional[Any] ) -> Dict: """simple docstring""" lowercase__ = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : List[str] = AttnSkipUpBlockaD # noqa F405 _lowercase : str = '''up''' @property def lowerCamelCase_ ( self: Optional[Any] ) -> Dict: """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ ) def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]: """simple docstring""" lowercase__ = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Dict = UpDecoderBlockaD # noqa F405 _lowercase : Tuple = '''up''' @property def lowerCamelCase_ ( self: int ) -> str: """simple docstring""" return super().get_dummy_input(include_temb=UpperCamelCase_ ) def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]: """simple docstring""" lowercase__ = {'''in_channels''': 32, '''out_channels''': 32} lowercase__ = self.dummy_input return init_dict, inputs_dict def lowerCamelCase_ ( self: Tuple ) -> Any: """simple docstring""" lowercase__ = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Optional[int] = AttnUpDecoderBlockaD # noqa F405 _lowercase : str = '''up''' @property def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]: """simple docstring""" return super().get_dummy_input(include_temb=UpperCamelCase_ ) def lowerCamelCase_ ( self: Dict ) -> List[str]: """simple docstring""" lowercase__ = {'''in_channels''': 32, '''out_channels''': 32} lowercase__ = self.dummy_input return init_dict, inputs_dict def lowerCamelCase_ ( self: int ) -> Optional[Any]: """simple docstring""" lowercase__ = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568] super().test_output(UpperCamelCase_ )
43
0
"""simple docstring""" import string def UpperCamelCase (SCREAMING_SNAKE_CASE ): UpperCamelCase : Union[str, Any] = """""" for i in sequence: UpperCamelCase : Dict = ord(SCREAMING_SNAKE_CASE ) if 65 <= extract <= 90: output += chr(155 - extract ) elif 97 <= extract <= 122: output += chr(219 - extract ) else: output += i return output def UpperCamelCase (SCREAMING_SNAKE_CASE ): UpperCamelCase : Optional[Any] = string.ascii_letters UpperCamelCase : Dict = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1] return "".join( letters_reversed[letters.index(SCREAMING_SNAKE_CASE )] if c in letters else c for c in sequence ) def UpperCamelCase (): from timeit import timeit print("""Running performance benchmarks...""" ) UpperCamelCase : Any = """from string import printable ; from __main__ import atbash, atbash_slow""" print(f"""> atbash_slow(): {timeit("atbash_slow(printable)" , setup=SCREAMING_SNAKE_CASE )} seconds""" ) print(f"""> atbash(): {timeit("atbash(printable)" , setup=SCREAMING_SNAKE_CASE )} seconds""" ) if __name__ == "__main__": for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"): print(f'''{example} encrypted in atbash: {atbash(example)}''') benchmark()
102
def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" lowercase__ = set() # Replace all the whitespace in our sentence lowercase__ = input_str.replace(''' ''' , '''''' ) for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower() ) return len(SCREAMING_SNAKE_CASE ) == 26 def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" lowercase__ = [False] * 26 for char in input_str: if char.islower(): lowercase__ = True elif char.isupper(): lowercase__ = True return all(SCREAMING_SNAKE_CASE ) def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" return len({char for char in input_str.lower() if char.isalpha()} ) == 26 def _a ( ): """simple docstring""" from timeit import timeit lowercase__ = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest''' print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE ) ) print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE ) ) print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE ) ) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
43
0
"""simple docstring""" import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup snake_case = logging.get_logger(__name__) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): def __init__( self : Tuple , **__lowerCamelCase : Any ): """simple docstring""" requires_backends(self , ['''bs4'''] ) super().__init__(**__lowerCamelCase ) def __UpperCAmelCase ( self : int , __lowerCamelCase : int ): """simple docstring""" _snake_case = [] _snake_case = [] _snake_case = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag _snake_case = parent.find_all(child.name , recursive=__lowerCamelCase ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(__lowerCamelCase ) else next(i for i, s in enumerate(__lowerCamelCase , 1 ) if s is child ) ) _snake_case = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : int ): """simple docstring""" _snake_case = BeautifulSoup(__lowerCamelCase , '''html.parser''' ) _snake_case = [] _snake_case = [] _snake_case = [] for element in html_code.descendants: if type(__lowerCamelCase ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue _snake_case = html.unescape(__lowerCamelCase ).strip() if not text_in_this_tag: continue all_doc_strings.append(__lowerCamelCase ) _snake_case , _snake_case = self.xpath_soup(__lowerCamelCase ) stringaxtag_seq.append(__lowerCamelCase ) stringaxsubs_seq.append(__lowerCamelCase ) if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError('''Number of doc strings and xtags does not correspond''' ) if len(__lowerCamelCase ) != len(__lowerCamelCase ): raise ValueError('''Number of doc strings and xsubs does not correspond''' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def __UpperCAmelCase ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Any ): """simple docstring""" _snake_case = '''''' for tagname, subs in zip(__lowerCamelCase , __lowerCamelCase ): xpath += f"""/{tagname}""" if subs != 0: xpath += f"""[{subs}]""" return xpath def __call__( self : Optional[Any] , __lowerCamelCase : Tuple ): """simple docstring""" _snake_case = False # Check that strings has a valid type if isinstance(__lowerCamelCase , __lowerCamelCase ): _snake_case = True elif isinstance(__lowerCamelCase , (list, tuple) ): if len(__lowerCamelCase ) == 0 or isinstance(html_strings[0] , __lowerCamelCase ): _snake_case = True if not valid_strings: raise ValueError( '''HTML strings must of type `str`, `List[str]` (batch of examples), ''' f"""but is of type {type(__lowerCamelCase )}.""" ) _snake_case = bool(isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(html_strings[0] , __lowerCamelCase )) ) if not is_batched: _snake_case = [html_strings] # Get nodes + xpaths _snake_case = [] _snake_case = [] for html_string in html_strings: _snake_case , _snake_case , _snake_case = self.get_three_from_single(__lowerCamelCase ) nodes.append(__lowerCamelCase ) _snake_case = [] for node, tag_list, sub_list in zip(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): _snake_case = self.construct_xpath(__lowerCamelCase , __lowerCamelCase ) xpath_strings.append(__lowerCamelCase ) xpaths.append(__lowerCamelCase ) # return as Dict _snake_case = {'''nodes''': nodes, '''xpaths''': xpaths} _snake_case = BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase ) return encoded_inputs
103
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ = np.full((len(SCREAMING_SNAKE_CASE ), sequence_length, 2) , SCREAMING_SNAKE_CASE ) else: lowercase__ = np.full((len(SCREAMING_SNAKE_CASE ), sequence_length) , SCREAMING_SNAKE_CASE ) for i, tensor in enumerate(SCREAMING_SNAKE_CASE ): if padding_side == "right": if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ = tensor[:sequence_length] else: lowercase__ = tensor[:sequence_length] else: if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ = tensor[:sequence_length] else: lowercase__ = tensor[:sequence_length] return out_tensor.tolist() def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = ord(SCREAMING_SNAKE_CASE ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26): return True lowercase__ = unicodedata.category(SCREAMING_SNAKE_CASE ) if cat.startswith('''P''' ): return True return False @dataclass class _a ( UpperCamelCase__ ): _lowercase : PreTrainedTokenizerBase _lowercase : Union[bool, str, PaddingStrategy] = True _lowercase : Optional[int] = None _lowercase : Optional[int] = None _lowercase : int = -100 _lowercase : str = "pt" def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Optional[Any] ) -> List[Any]: """simple docstring""" import torch lowercase__ = '''label''' if '''label''' in features[0].keys() else '''labels''' lowercase__ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None lowercase__ = self.tokenizer.pad( UpperCamelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , ) if labels is None: return batch lowercase__ = torch.tensor(batch['''entity_ids'''] ).shape[1] lowercase__ = self.tokenizer.padding_side if padding_side == "right": lowercase__ = [ list(UpperCamelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCamelCase_ )) for label in labels ] else: lowercase__ = [ [self.label_pad_token_id] * (sequence_length - len(UpperCamelCase_ )) + list(UpperCamelCase_ ) for label in labels ] lowercase__ = [feature['''ner_tags'''] for feature in features] lowercase__ = padding_tensor(UpperCamelCase_ , -1 , UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = [feature['''original_entity_spans'''] for feature in features] lowercase__ = padding_tensor(UpperCamelCase_ , (-1, -1) , UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = {k: torch.tensor(UpperCamelCase_ , dtype=torch.intaa ) for k, v in batch.items()} return batch
43
0
"""simple docstring""" def _lowerCamelCase ( UpperCAmelCase_ : Optional[Any] ) -> Dict: """simple docstring""" A__ = [] A__ = set({"(", "[", "{"} ) A__ = set({")", "]", "}"} ) A__ = {"{": "}", "[": "]", "(": ")"} for i in range(len(UpperCAmelCase_ ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(UpperCAmelCase_ ) == 0 or (len(UpperCAmelCase_ ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(UpperCAmelCase_ ) == 0 def _lowerCamelCase ( ) -> str: """simple docstring""" A__ = input("Enter sequence of brackets: " ) if is_balanced(UpperCAmelCase_ ): print(UpperCAmelCase_, "is balanced" ) else: print(UpperCAmelCase_, "is not balanced" ) if __name__ == "__main__": main()
104
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class _a ( UpperCamelCase__ ): def __init__( self: int , *UpperCamelCase_: str , UpperCamelCase_: List[str]=None , UpperCamelCase_: int=None , **UpperCamelCase_: Optional[Any] ) -> List[str]: """simple docstring""" super().__init__(*UpperCamelCase_ , **UpperCamelCase_ ) lowercase__ = eval_examples lowercase__ = post_process_function def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Optional[Dataset] = None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: str = "eval" , **UpperCamelCase_: int , ) -> Dict[str, float]: """simple docstring""" lowercase__ = gen_kwargs.copy() lowercase__ = ( gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length ) lowercase__ = ( gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams ) lowercase__ = gen_kwargs lowercase__ = self.eval_dataset if eval_dataset is None else eval_dataset lowercase__ = self.get_eval_dataloader(UpperCamelCase_ ) lowercase__ = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. lowercase__ = self.compute_metrics lowercase__ = None lowercase__ = time.time() lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: lowercase__ = eval_loop( UpperCamelCase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , ) finally: lowercase__ = compute_metrics lowercase__ = self.args.eval_batch_size * self.args.world_size if f'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time'] output.metrics.update( speed_metrics( UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = self.compute_metrics(UpperCamelCase_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'{metric_key_prefix}_' ): lowercase__ = metrics.pop(UpperCamelCase_ ) metrics.update(output.metrics ) else: lowercase__ = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(UpperCamelCase_ ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) lowercase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase_ ) return metrics def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: List[str]=None , UpperCamelCase_: str = "test" , **UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]: """simple docstring""" lowercase__ = gen_kwargs.copy() lowercase__ = self.get_test_dataloader(UpperCamelCase_ ) # Temporarily disable metric computation, we will do it in the loop here. lowercase__ = self.compute_metrics lowercase__ = None lowercase__ = time.time() lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: lowercase__ = eval_loop( UpperCamelCase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , ) finally: lowercase__ = compute_metrics lowercase__ = self.args.eval_batch_size * self.args.world_size if f'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time'] output.metrics.update( speed_metrics( UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , '''predict''' ) lowercase__ = self.compute_metrics(UpperCamelCase_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'{metric_key_prefix}_' ): lowercase__ = metrics.pop(UpperCamelCase_ ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase_ )
43
0
from typing import TYPE_CHECKING from ...utils import _LazyModule UpperCamelCase__ : Tuple = {'''tokenization_byt5''': ['''ByT5Tokenizer''']} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys UpperCamelCase__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
105
import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = os.path.join(args.tf_model_dir , '''parameters.json''' ) lowercase__ = json.loads(open(SCREAMING_SNAKE_CASE ).read() ) if not params: raise ValueError( f'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' ) if not args.output.endswith('''.pt''' ): lowercase__ = args.output + '''.pt''' lowercase__ = OrderedDict() with tf.device('''/CPU:0''' ): lowercase__ = tf.train.load_checkpoint(args.tf_model_dir ) lowercase__ = reader.get_variable_to_shape_map() for key_name in shapes.keys(): lowercase__ = reader.get_tensor(SCREAMING_SNAKE_CASE ).astype(np.floataa ) if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ): continue if key_name.startswith('''pasts/''' ): if key_name.startswith('''pasts/mlp''' ): lowercase__ = int(key_name[9] ) elif key_name.startswith('''pasts/out''' ): lowercase__ = 8 lowercase__ = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/moe''' ): lowercase__ = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/switch_gating/kernel''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/softmlp/kernel''' ): lowercase__ = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ): lowercase__ = key_name[-9:-7] for i in range(16 ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer) lowercase__ = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/mlp''' ): lowercase__ = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/p1/kernel''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/p1/bias''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/p2/kernel''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/p2/bias''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/ln''' ): lowercase__ = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): lowercase__ = '''model.blocks.%d.feed_forward.norm.bias''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/g''' ): lowercase__ = '''model.blocks.%d.feed_forward.norm.weight''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/att''' ): lowercase__ = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/qkv/kernel''' ): lowercase__ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum lowercase__ = state[:, 0, :, :] lowercase__ = state[:, 1, :, :] lowercase__ = state[:, 2, :, :] lowercase__ = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase__ = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase__ = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase__ = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) lowercase__ = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) lowercase__ = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/o/kernel''' ): lowercase__ = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player lowercase__ = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/an''' ): lowercase__ = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): lowercase__ = '''model.blocks.%d.self_attn.norm.bias''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/g''' ): lowercase__ = '''model.blocks.%d.self_attn.norm.weight''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif ( key_name.startswith('''model/wte''' ) or key_name.startswith('''model/wpe''' ) or key_name.startswith('''model/ete''' ) ): lowercase__ = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[ key_name[-3:] ] lowercase__ = '''model.%s.weight''' % nlayer lowercase__ = vnp.copy() # same in embedded lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) if key_name.startswith('''model/wte''' ): lowercase__ = '''lm_head.weight''' lowercase__ = vnp.copy() # same in embedded lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/wob''' ): lowercase__ = '''final_logits_bias''' lowercase__ = vnp.copy() # same in embedded lowercase__ = state.reshape((1, -1) ) lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name == "model/dense/kernel": lowercase__ = '''model.last_project.weight''' lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name == "model/dense_1/bias": lowercase__ = '''model.last_project.bias''' lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) torch.save(SCREAMING_SNAKE_CASE , args.output ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser( description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model') parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model') lowerCAmelCase = parser.parse_args() convert_tf_gptsan_to_pt(args)
43
0
from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar __snake_case :Any =TypeVar('T') class lowerCAmelCase__ ( Generic[T] ): def __init__( self : Union[str, Any] , __UpperCamelCase : list[T] , __UpperCamelCase : Callable[[T, T], T] ) -> None: A = None A = len(__UpperCamelCase ) A = [any_type for _ in range(self.N )] + arr A = fnc self.build() def __UpperCamelCase ( self : List[str] ) -> None: for p in range(self.N - 1 , 0 , -1 ): A = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def __UpperCamelCase ( self : int , __UpperCamelCase : int , __UpperCamelCase : T ) -> None: p += self.N A = v while p > 1: A = p // 2 A = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : int ) -> T | None: # noqa: E741 A , A = l + self.N, r + self.N A = None while l <= r: if l % 2 == 1: A = self.st[l] if res is None else self.fn(__UpperCamelCase , self.st[l] ) if r % 2 == 0: A = self.st[r] if res is None else self.fn(__UpperCamelCase , self.st[r] ) A , A = (l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce __snake_case :int =[1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12] __snake_case :int ={ 0: 7, 1: 2, 2: 6, 3: -14, 4: 5, 5: 4, 6: 7, 7: -10, 8: 9, 9: 10, 10: 12, 11: 1, } __snake_case :int =SegmentTree(test_array, min) __snake_case :List[str] =SegmentTree(test_array, max) __snake_case :Any =SegmentTree(test_array, lambda a, b: a + b) def lowerCamelCase_ ( ) -> None: '''simple docstring''' for i in range(len(lowerCAmelCase__ ) ): for j in range(lowerCAmelCase__ , len(lowerCAmelCase__ ) ): A = reduce(lowerCAmelCase__ , test_array[i : j + 1] ) A = reduce(lowerCAmelCase__ , test_array[i : j + 1] ) A = reduce(lambda lowerCAmelCase__ , lowerCAmelCase__ : a + b , test_array[i : j + 1] ) assert min_range == min_segment_tree.query(lowerCAmelCase__ , lowerCAmelCase__ ) assert max_range == max_segment_tree.query(lowerCAmelCase__ , lowerCAmelCase__ ) assert sum_range == sum_segment_tree.query(lowerCAmelCase__ , lowerCAmelCase__ ) test_all_segments() for index, value in test_updates.items(): __snake_case :Optional[Any] =value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
106
from __future__ import annotations def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" return len(set(SCREAMING_SNAKE_CASE ) ) == len(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
43
0
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import _LazyModule _UpperCAmelCase : Tuple = {'''tokenization_tapex''': ['''TapexTokenizer''']} if TYPE_CHECKING: from .tokenization_tapex import TapexTokenizer else: import sys _UpperCAmelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
107
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase = { 'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'], 'tokenization_convbert': ['ConvBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['ConvBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ 'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvBertForMaskedLM', 'ConvBertForMultipleChoice', 'ConvBertForQuestionAnswering', 'ConvBertForSequenceClassification', 'ConvBertForTokenClassification', 'ConvBertLayer', 'ConvBertModel', 'ConvBertPreTrainedModel', 'load_tf_weights_in_convbert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ 'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFConvBertForMaskedLM', 'TFConvBertForMultipleChoice', 'TFConvBertForQuestionAnswering', 'TFConvBertForSequenceClassification', 'TFConvBertForTokenClassification', 'TFConvBertLayer', 'TFConvBertModel', 'TFConvBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
43
0
import unittest from transformers import DebertaVaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaVaForMaskedLM, DebertaVaForMultipleChoice, DebertaVaForQuestionAnswering, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaModel, ) from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ): '''simple docstring''' def __init__( self : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Optional[Any]=13 , lowerCamelCase : List[Any]=7 , lowerCamelCase : List[Any]=True , lowerCamelCase : str=True , lowerCamelCase : List[str]=True , lowerCamelCase : List[str]=True , lowerCamelCase : Optional[int]=99 , lowerCamelCase : int=32 , lowerCamelCase : str=5 , lowerCamelCase : str=4 , lowerCamelCase : List[str]=37 , lowerCamelCase : str="gelu" , lowerCamelCase : Dict=0.1 , lowerCamelCase : Any=0.1 , lowerCamelCase : Union[str, Any]=512 , lowerCamelCase : Optional[int]=16 , lowerCamelCase : Any=2 , lowerCamelCase : Any=0.02 , lowerCamelCase : List[Any]=False , lowerCamelCase : Optional[Any]=True , lowerCamelCase : Optional[int]="None" , lowerCamelCase : Tuple=3 , lowerCamelCase : int=4 , lowerCamelCase : Optional[Any]=None , ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = relative_attention _UpperCAmelCase = position_biased_input _UpperCAmelCase = pos_att_type _UpperCAmelCase = scope def lowerCamelCase ( self : str ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase ( self : Dict ) -> Dict: """simple docstring""" return DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : List[Any] ) -> Union[str, Any]: """simple docstring""" self.parent.assertListEqual(list(result.loss.size() ) , [] ) def lowerCamelCase ( self : Optional[Any] , lowerCamelCase : int , lowerCamelCase : List[Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : List[str] , lowerCamelCase : Any ) -> List[Any]: """simple docstring""" _UpperCAmelCase = DebertaVaModel(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() _UpperCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase )[0] _UpperCAmelCase = model(lowerCamelCase , token_type_ids=lowerCamelCase )[0] _UpperCAmelCase = model(lowerCamelCase )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def lowerCamelCase ( self : List[str] , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : List[Any] ) -> List[str]: """simple docstring""" _UpperCAmelCase = DebertaVaForMaskedLM(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() _UpperCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase ( self : str , lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : Dict , lowerCamelCase : List[Any] , lowerCamelCase : List[str] , lowerCamelCase : Optional[Any] , lowerCamelCase : Any ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = self.num_labels _UpperCAmelCase = DebertaVaForSequenceClassification(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() _UpperCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(lowerCamelCase ) def lowerCamelCase ( self : Dict , lowerCamelCase : List[str] , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.num_labels _UpperCAmelCase = DebertaVaForTokenClassification(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() _UpperCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase ( self : Any , lowerCamelCase : Tuple , lowerCamelCase : Any , lowerCamelCase : str , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : str ) -> Tuple: """simple docstring""" _UpperCAmelCase = DebertaVaForQuestionAnswering(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() _UpperCAmelCase = model( lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase ( self : Dict , lowerCamelCase : List[str] , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : str ) -> str: """simple docstring""" _UpperCAmelCase = DebertaVaForMultipleChoice(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() _UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = model( lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase ( self : List[str] ) -> Optional[int]: """simple docstring""" _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase = ( ( DebertaVaModel, DebertaVaForMaskedLM, DebertaVaForSequenceClassification, DebertaVaForTokenClassification, DebertaVaForQuestionAnswering, DebertaVaForMultipleChoice, ) if is_torch_available() else () ) _lowerCamelCase = ( { '''feature-extraction''': DebertaVaModel, '''fill-mask''': DebertaVaForMaskedLM, '''question-answering''': DebertaVaForQuestionAnswering, '''text-classification''': DebertaVaForSequenceClassification, '''token-classification''': DebertaVaForTokenClassification, '''zero-shot''': DebertaVaForSequenceClassification, } if is_torch_available() else {} ) _lowerCamelCase = True _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False def lowerCamelCase ( self : int ) -> List[str]: """simple docstring""" _UpperCAmelCase = DebertaVaModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 ) def lowerCamelCase ( self : Optional[int] ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def lowerCamelCase ( self : Tuple ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*lowerCamelCase ) def lowerCamelCase ( self : List[Any] ) -> Any: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCamelCase ) def lowerCamelCase ( self : Union[str, Any] ) -> List[str]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCamelCase ) def lowerCamelCase ( self : Tuple ) -> Tuple: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*lowerCamelCase ) def lowerCamelCase ( self : Dict ) -> List[str]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*lowerCamelCase ) def lowerCamelCase ( self : int ) -> List[Any]: """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCamelCase ) @slow def lowerCamelCase ( self : Dict ) -> Optional[int]: """simple docstring""" for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = DebertaVaModel.from_pretrained(lowerCamelCase ) self.assertIsNotNone(lowerCamelCase ) @require_torch @require_sentencepiece @require_tokenizers class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' @unittest.skip(reason="""Model not available yet""" ) def lowerCamelCase ( self : Any ) -> Tuple: """simple docstring""" pass @slow def lowerCamelCase ( self : Union[str, Any] ) -> int: """simple docstring""" _UpperCAmelCase = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" ) _UpperCAmelCase = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ) _UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): _UpperCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase )[0] # compare the actual values for a slice. _UpperCAmelCase = torch.tensor( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCamelCase , atol=1E-4 ) , f"""{output[:, 1:4, 1:4]}""" )
108
import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _a ( unittest.TestCase ): def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]: """simple docstring""" super().tearDown() gc.collect() def lowerCamelCase_ ( self: Dict ) -> Tuple: """simple docstring""" lowercase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) lowercase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) lowercase__ = '''xvjiarui/stable-diffusion-2-inpainting''' lowercase__ , lowercase__ = FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ ) lowercase__ = '''Face of a yellow cat, high resolution, sitting on a park bench''' lowercase__ = jax.random.PRNGKey(0 ) lowercase__ = 50 lowercase__ = jax.device_count() lowercase__ = num_samples * [prompt] lowercase__ = num_samples * [init_image] lowercase__ = num_samples * [mask_image] lowercase__ , lowercase__ , lowercase__ = pipeline.prepare_inputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # shard inputs and rng lowercase__ = replicate(UpperCamelCase_ ) lowercase__ = jax.random.split(UpperCamelCase_ , jax.device_count() ) lowercase__ = shard(UpperCamelCase_ ) lowercase__ = shard(UpperCamelCase_ ) lowercase__ = shard(UpperCamelCase_ ) lowercase__ = pipeline( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , jit=UpperCamelCase_ ) lowercase__ = output.images.reshape(UpperCamelCase_ , 512 , 512 , 3 ) lowercase__ = images[0, 253:256, 253:256, -1] lowercase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowercase__ = jnp.array( [0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] ) print(f'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
43
0
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __a ( _snake_case ): __UpperCamelCase : UNetaDModel __UpperCamelCase : KarrasVeScheduler def __init__( self : Tuple ,lowerCamelCase : UNetaDModel ,lowerCamelCase : KarrasVeScheduler ): '''simple docstring''' super().__init__() self.register_modules(unet=lowerCamelCase ,scheduler=lowerCamelCase ) @torch.no_grad() def __call__( self : Union[str, Any] ,lowerCamelCase : int = 1 ,lowerCamelCase : int = 50 ,lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,lowerCamelCase : Optional[str] = "pil" ,lowerCamelCase : bool = True ,**lowerCamelCase : Tuple ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.unet.config.sample_size __SCREAMING_SNAKE_CASE = (batch_size, 3, img_size, img_size) __SCREAMING_SNAKE_CASE = self.unet # sample x_0 ~ N(0, sigma_0^2 * I) __SCREAMING_SNAKE_CASE = randn_tensor(lowerCamelCase ,generator=lowerCamelCase ,device=self.device ) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(lowerCamelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # here sigma_t == t_i from the paper __SCREAMING_SNAKE_CASE = self.scheduler.schedule[t] __SCREAMING_SNAKE_CASE = self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.scheduler.add_noise_to_input(lowerCamelCase ,lowerCamelCase ,generator=lowerCamelCase ) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. __SCREAMING_SNAKE_CASE = (sigma_hat / 2) * model((sample_hat + 1) / 2 ,sigma_hat / 2 ).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev __SCREAMING_SNAKE_CASE = self.scheduler.step(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase ) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. __SCREAMING_SNAKE_CASE = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 ,sigma_prev / 2 ).sample __SCREAMING_SNAKE_CASE = self.scheduler.step_correct( lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,step_output.prev_sample ,step_output["""derivative"""] ,) __SCREAMING_SNAKE_CASE = step_output.prev_sample __SCREAMING_SNAKE_CASE = (sample / 2 + 0.5).clamp(0 ,1 ) __SCREAMING_SNAKE_CASE = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy() if output_type == "pil": __SCREAMING_SNAKE_CASE = self.numpy_to_pil(lowerCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCamelCase )
109
from __future__ import annotations import math def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if len(SCREAMING_SNAKE_CASE ) == 0: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , ) return min( minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , ) def _a ( ): """simple docstring""" lowercase__ = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23] lowercase__ = math.log(len(SCREAMING_SNAKE_CASE ) , 2 ) print('''Optimal value : ''' , end='''''' ) print(minimax(0 , 0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
43
0
"""simple docstring""" def lowerCamelCase ( _snake_case ): UpperCAmelCase__ : Any = len(_snake_case ) UpperCAmelCase__ : Optional[Any] = sum(_snake_case ) UpperCAmelCase__ : List[str] = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 ,n + 1 ): UpperCAmelCase__ : Union[str, Any] = True for i in range(1 ,s + 1 ): UpperCAmelCase__ : Optional[int] = False for i in range(1 ,n + 1 ): for j in range(1 ,s + 1 ): UpperCAmelCase__ : Tuple = dp[i][j - 1] if arr[i - 1] <= j: UpperCAmelCase__ : List[Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) ,-1 ,-1 ): if dp[n][j] is True: UpperCAmelCase__ : Dict = s - 2 * j break return diff
110
class _a : def __init__( self: Tuple , UpperCamelCase_: Dict ) -> List[str]: """simple docstring""" lowercase__ = val lowercase__ = None lowercase__ = None def lowerCamelCase_ ( self: Any , UpperCamelCase_: Any ) -> Union[str, Any]: """simple docstring""" if self.val: if val < self.val: if self.left is None: lowercase__ = Node(UpperCamelCase_ ) else: self.left.insert(UpperCamelCase_ ) elif val > self.val: if self.right is None: lowercase__ = Node(UpperCamelCase_ ) else: self.right.insert(UpperCamelCase_ ) else: lowercase__ = val def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" if root: inorder(root.left , SCREAMING_SNAKE_CASE ) res.append(root.val ) inorder(root.right , SCREAMING_SNAKE_CASE ) def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" if len(SCREAMING_SNAKE_CASE ) == 0: return arr lowercase__ = Node(arr[0] ) for i in range(1 , len(SCREAMING_SNAKE_CASE ) ): root.insert(arr[i] ) # Traverse BST in order. lowercase__ = [] inorder(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
43
0
from collections import deque from .hash_table import HashTable class __magic_name__ (UpperCamelCase__ ): '''simple docstring''' def __init__( self:Dict , *_a:Any , **_a:Tuple ): super().__init__(*UpperCamelCase_ , **UpperCamelCase_ ) def SCREAMING_SNAKE_CASE__ ( self:Any , _a:List[str] , _a:str ): snake_case__ = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(UpperCamelCase_ ) snake_case__ = self.values[key] def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): return ( sum(self.charge_factor - len(UpperCamelCase_ ) for slot in self.values ) / self.size_table * self.charge_factor ) def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:Optional[Any] , _a:List[str]=None ): if not ( len(self.values[key] ) == self.charge_factor and self.values.count(UpperCamelCase_ ) == 0 ): return key return super()._collision_resolution(UpperCamelCase_ , UpperCamelCase_ )
33
lowerCAmelCase = { 'a': 'AAAAA', 'b': 'AAAAB', 'c': 'AAABA', 'd': 'AAABB', 'e': 'AABAA', 'f': 'AABAB', 'g': 'AABBA', 'h': 'AABBB', 'i': 'ABAAA', 'j': 'BBBAA', 'k': 'ABAAB', 'l': 'ABABA', 'm': 'ABABB', 'n': 'ABBAA', 'o': 'ABBAB', 'p': 'ABBBA', 'q': 'ABBBB', 'r': 'BAAAA', 's': 'BAAAB', 't': 'BAABA', 'u': 'BAABB', 'v': 'BBBAB', 'w': 'BABAA', 'x': 'BABAB', 'y': 'BABBA', 'z': 'BABBB', ' ': ' ', } lowerCAmelCase = {value: key for key, value in encode_dict.items()} def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = '''''' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('''encode() accepts only letters of the alphabet and spaces''' ) return encoded def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" if set(SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set(): raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' ) lowercase__ = '''''' for word in coded.split(): while len(SCREAMING_SNAKE_CASE ) != 0: decoded += decode_dict[word[:5]] lowercase__ = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
43
0
'''simple docstring''' import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder a : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name a : Optional[Any] = 256 class a ( UpperCamelCase__ ): snake_case_ = ['''melgan'''] def __init__( self : List[Any] , lowercase_ : SpectrogramNotesEncoder , lowercase_ : SpectrogramContEncoder , lowercase_ : TaFilmDecoder , lowercase_ : DDPMScheduler , lowercase_ : OnnxRuntimeModel if is_onnx_available() else Any , ): super().__init__() # From MELGAN snake_case_ = math.log(1e-5 ) # Matches MelGAN training. snake_case_ = 4.0 # Largest value for most examples snake_case_ = 128 self.register_modules( notes_encoder=UpperCamelCase_ , continuous_encoder=UpperCamelCase_ , decoder=UpperCamelCase_ , scheduler=UpperCamelCase_ , melgan=UpperCamelCase_ , ) def A_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : List[Any]=False ): snake_case_ ,snake_case_ = output_range if clip: snake_case_ = torch.clip(UpperCamelCase_ , self.min_value , self.max_value ) # Scale to [0, 1]. snake_case_ = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def A_ ( self : List[str] , lowercase_ : List[str] , lowercase_ : Optional[int]=(-1.0, 1.0) , lowercase_ : List[str]=False ): snake_case_ ,snake_case_ = input_range snake_case_ = torch.clip(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) if clip else outputs # Scale to [0, 1]. snake_case_ = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def A_ ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Optional[int] ): snake_case_ = input_tokens > 0 snake_case_ ,snake_case_ = self.notes_encoder( encoder_input_tokens=UpperCamelCase_ , encoder_inputs_mask=UpperCamelCase_ ) snake_case_ ,snake_case_ = self.continuous_encoder( encoder_inputs=UpperCamelCase_ , encoder_inputs_mask=UpperCamelCase_ ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def A_ ( self : Tuple , lowercase_ : Any , lowercase_ : str , lowercase_ : Optional[Any] ): snake_case_ = noise_time if not torch.is_tensor(UpperCamelCase_ ): snake_case_ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(UpperCamelCase_ ) and len(timesteps.shape ) == 0: snake_case_ = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML snake_case_ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) snake_case_ = self.decoder( encodings_and_masks=UpperCamelCase_ , decoder_input_tokens=UpperCamelCase_ , decoder_noise_time=UpperCamelCase_ ) return logits @torch.no_grad() def __call__( self : Union[str, Any] , lowercase_ : List[List[int]] , lowercase_ : Optional[torch.Generator] = None , lowercase_ : int = 100 , lowercase_ : bool = True , lowercase_ : str = "numpy" , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , ): if (callback_steps is None) or ( callback_steps is not None and (not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or callback_steps <= 0) ): raise ValueError( F"`callback_steps` has to be a positive integer but is {callback_steps} of type" F" {type(UpperCamelCase_ )}." ) snake_case_ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) snake_case_ = np.zeros([1, 0, self.n_dims] , np.floataa ) snake_case_ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=UpperCamelCase_ , device=self.device ) for i, encoder_input_tokens in enumerate(UpperCamelCase_ ): if i == 0: snake_case_ = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. snake_case_ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=UpperCamelCase_ , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. snake_case_ = ones snake_case_ = self.scale_features( UpperCamelCase_ , output_range=[-1.0, 1.0] , clip=UpperCamelCase_ ) snake_case_ = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=UpperCamelCase_ , continuous_mask=UpperCamelCase_ , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop snake_case_ = randn_tensor( shape=encoder_continuous_inputs.shape , generator=UpperCamelCase_ , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(UpperCamelCase_ ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): snake_case_ = self.decode( encodings_and_masks=UpperCamelCase_ , input_tokens=UpperCamelCase_ , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 snake_case_ = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample snake_case_ = self.scale_to_features(UpperCamelCase_ , input_range=[-1.0, 1.0] ) snake_case_ = mel[:1] snake_case_ = mel.cpu().float().numpy() snake_case_ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(UpperCamelCase_ , UpperCamelCase_ ) logger.info('''Generated segment''' , UpperCamelCase_ ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( '''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' ) elif output_type == "numpy" and self.melgan is None: raise ValueError( '''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' ) if output_type == "numpy": snake_case_ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: snake_case_ = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=UpperCamelCase_ )
640
import numpy as np def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" return 1 / (1 + np.exp(-vector )) if __name__ == "__main__": import doctest doctest.testmod()
43
0
import functools def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ): __magic_name__ : Optional[Any] =len(lowerCamelCase ) __magic_name__ : Optional[int] =len(lowerCamelCase ) @functools.cache def min_distance(lowerCamelCase , lowerCamelCase ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa __magic_name__ : Dict =int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , lowerCamelCase ) , 1 + min_distance(lowerCamelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
21
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = '▁' lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'} lowerCAmelCase = { 'vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model', }, 'monolingual_vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt', }, } lowerCAmelCase = {'vinai/bartpho-syllable': 1024} class _a ( UpperCamelCase__ ): _lowercase : Tuple = VOCAB_FILES_NAMES _lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP _lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase : Any = ['''input_ids''', '''attention_mask'''] def __init__( self: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any]="<s>" , UpperCamelCase_: List[Any]="</s>" , UpperCamelCase_: Optional[int]="</s>" , UpperCamelCase_: List[str]="<s>" , UpperCamelCase_: Optional[int]="<unk>" , UpperCamelCase_: Optional[int]="<pad>" , UpperCamelCase_: Optional[int]="<mask>" , UpperCamelCase_: Optional[Dict[str, Any]] = None , **UpperCamelCase_: int , ) -> None: """simple docstring""" lowercase__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) lowercase__ = vocab_file lowercase__ = monolingual_vocab_file lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCamelCase_ ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility lowercase__ = {} lowercase__ = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids: lowercase__ = cnt cnt += 1 with open(UpperCamelCase_ , '''r''' , encoding='''utf-8''' ) as f: for line in f.readlines(): lowercase__ = line.strip().split()[0] lowercase__ = len(self.fairseq_tokens_to_ids ) if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids: lowercase__ = len(self.fairseq_tokens_to_ids ) lowercase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self: Tuple ) -> int: """simple docstring""" lowercase__ = self.__dict__.copy() lowercase__ = None lowercase__ = self.sp_model.serialized_model_proto() return state def __setstate__( self: List[str] , UpperCamelCase_: int ) -> List[Any]: """simple docstring""" lowercase__ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowercase__ = {} lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase__ = [self.cls_token_id] lowercase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1] def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowerCamelCase_ ( self: List[str] ) -> List[str]: """simple docstring""" return len(self.fairseq_ids_to_tokens ) def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]: """simple docstring""" lowercase__ = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCamelCase_ ( self: int , UpperCamelCase_: str ) -> List[str]: """simple docstring""" return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ ) def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Any ) -> Dict: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def lowerCamelCase_ ( self: str , UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return self.fairseq_ids_to_tokens[index] def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: int ) -> Dict: """simple docstring""" lowercase__ = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip() return out_string def lowerCamelCase_ ( self: Any , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(UpperCamelCase_ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return lowercase__ = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase__ = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase_ , '''wb''' ) as fi: lowercase__ = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_ ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( UpperCamelCase_ ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , UpperCamelCase_ ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'{str(UpperCamelCase_ )} \n' ) return out_vocab_file, out_monolingual_vocab_file
43
0
'''simple docstring''' import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , unittest.TestCase ): """simple docstring""" A__ : List[str] = XLMTokenizer A__ : str = False def a__ ( self ) -> Any: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A: List[Any] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] A: Optional[Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) ) A: Tuple = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""] A: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) A: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" ) as fp: fp.write(json.dumps(UpperCamelCase_ ) ) with open(self.merges_file , """w""" ) as fp: fp.write("""\n""".join(UpperCamelCase_ ) ) def a__ ( self , A ) -> Tuple: A: Tuple = """lower newer""" A: List[Any] = """lower newer""" return input_text, output_text def a__ ( self ) -> int: A: Dict = XLMTokenizer(self.vocab_file , self.merges_file ) A: Union[str, Any] = """lower""" A: int = ["""low""", """er</w>"""] A: List[Any] = tokenizer.tokenize(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) A: Any = tokens + ["""<unk>"""] A: Dict = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ ) @slow def a__ ( self ) -> str: A: Any = XLMTokenizer.from_pretrained("""xlm-mlm-en-2048""" ) A: Optional[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCamelCase_ ) A: Optional[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCamelCase_ ) A: Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ ) A: List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
135
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase = logging.get_logger(__name__) def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = original_name.split('''.''' )[0] lowercase__ = key.split('''.''' ) lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 2] ) lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 1] ) lowercase__ = orig_block_num - offset lowercase__ = key.replace(f'{orig_block_num}.{layer_num}.{original_name}' , f'block.{new_block_num}.{layer_num}.{new_name}' ) return key def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = OrderedDict() lowercase__ , lowercase__ = 0, 0 for key, value in state_dict.items(): if key.startswith('''network''' ): lowercase__ = key.replace('''network''' , '''poolformer.encoder''' ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith('''bias''' ) and "patch_embed" not in key: patch_emb_offset += 1 lowercase__ = key[: key.find('''proj''' )] lowercase__ = key.replace(SCREAMING_SNAKE_CASE , f'patch_embeddings.{total_embed_found}.' ) lowercase__ = key.replace('''proj''' , '''projection''' ) if key.endswith('''bias''' ): total_embed_found += 1 if "patch_embeddings" in key: lowercase__ = '''poolformer.encoder.''' + key if "mlp.fc1" in key: lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc1''' , '''output.conv1''' ) if "mlp.fc2" in key: lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc2''' , '''output.conv2''' ) if "norm1" in key: lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm1''' , '''before_norm''' ) if "norm2" in key: lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm2''' , '''after_norm''' ) if "layer_scale_1" in key: lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_1''' , '''layer_scale_1''' ) if "layer_scale_2" in key: lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_2''' , '''layer_scale_2''' ) if "head" in key: lowercase__ = key.replace('''head''' , '''classifier''' ) lowercase__ = value return new_state_dict def _a ( ): """simple docstring""" lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ) return image @torch.no_grad() def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = PoolFormerConfig() # set attributes based on model_name lowercase__ = '''huggingface/label-files''' lowercase__ = model_name[-3:] lowercase__ = 10_00 lowercase__ = '''imagenet-1k-id2label.json''' lowercase__ = (1, 10_00) # set config attributes lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) ) lowercase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowercase__ = idalabel lowercase__ = {v: k for k, v in idalabel.items()} if size == "s12": lowercase__ = [2, 2, 6, 2] lowercase__ = [64, 1_28, 3_20, 5_12] lowercase__ = 4.0 lowercase__ = 0.9 elif size == "s24": lowercase__ = [4, 4, 12, 4] lowercase__ = [64, 1_28, 3_20, 5_12] lowercase__ = 4.0 lowercase__ = 0.9 elif size == "s36": lowercase__ = [6, 6, 18, 6] lowercase__ = [64, 1_28, 3_20, 5_12] lowercase__ = 4.0 lowercase__ = 1E-6 lowercase__ = 0.9 elif size == "m36": lowercase__ = [6, 6, 18, 6] lowercase__ = [96, 1_92, 3_84, 7_68] lowercase__ = 4.0 lowercase__ = 1E-6 lowercase__ = 0.95 elif size == "m48": lowercase__ = [8, 8, 24, 8] lowercase__ = [96, 1_92, 3_84, 7_68] lowercase__ = 4.0 lowercase__ = 1E-6 lowercase__ = 0.95 else: raise ValueError(f'Size {size} not supported' ) # load image processor lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE ) # Prepare image lowercase__ = prepare_img() lowercase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values logger.info(f'Converting model {model_name}...' ) # load original state dict lowercase__ = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device('''cpu''' ) ) # rename keys lowercase__ = rename_keys(SCREAMING_SNAKE_CASE ) # create HuggingFace model and load state dict lowercase__ = PoolFormerForImageClassification(SCREAMING_SNAKE_CASE ) model.load_state_dict(SCREAMING_SNAKE_CASE ) model.eval() # Define image processor lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE ) lowercase__ = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values # forward pass lowercase__ = model(SCREAMING_SNAKE_CASE ) lowercase__ = outputs.logits # define expected logit slices for different models if size == "s12": lowercase__ = torch.tensor([-0.3_045, -0.6_758, -0.4_869] ) elif size == "s24": lowercase__ = torch.tensor([0.4_402, -0.1_374, -0.8_045] ) elif size == "s36": lowercase__ = torch.tensor([-0.6_080, -0.5_133, -0.5_898] ) elif size == "m36": lowercase__ = torch.tensor([0.3_952, 0.2_263, -1.2_668] ) elif size == "m48": lowercase__ = torch.tensor([0.1_167, -0.0_656, -0.3_423] ) else: raise ValueError(f'Size {size} not supported' ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-2 ) # finally, save model and image processor logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' ) Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE ) model.save_pretrained(SCREAMING_SNAKE_CASE ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '--model_name', default='poolformer_s12', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) lowerCAmelCase = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
43
0
from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time __A = Lock() def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a , __a ) -> Any: """simple docstring""" global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(__a ) process_lock.release() # receive your right neighbor's value process_lock.acquire() lowerCamelCase__: Tuple =rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left lowerCamelCase__: Any =min(__a , __a ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(__a ) process_lock.release() # receive your left neighbor's value process_lock.acquire() lowerCamelCase__: List[Any] =lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right lowerCamelCase__: Optional[Any] =max(__a , __a ) # after all swaps are performed, send the values back to main result_pipe[1].send(__a ) def lowerCAmelCase_ ( __a ) -> int: """simple docstring""" lowerCamelCase__: int =[] lowerCamelCase__: List[str] =[] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop lowerCamelCase__: List[str] =Pipe() lowerCamelCase__: Dict =Pipe() process_array_.append( Process( target=__a , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) lowerCamelCase__: Tuple =temp_rs lowerCamelCase__: str =temp_rr for i in range(1 , len(__a ) - 1 ): lowerCamelCase__: List[str] =Pipe() lowerCamelCase__: int =Pipe() process_array_.append( Process( target=__a , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) lowerCamelCase__: int =temp_rs lowerCamelCase__: Tuple =temp_rr process_array_.append( Process( target=__a , args=( len(__a ) - 1, arr[len(__a ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(__a ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(__a ) ): lowerCamelCase__: Dict =result_pipe[p][0].recv() process_array_[p].join() return arr def lowerCAmelCase_ ( ) -> int: """simple docstring""" lowerCamelCase__: List[str] =list(range(10 , 0 , -1 ) ) print("Initial List" ) print(*__a ) lowerCamelCase__: Any =odd_even_transposition(__a ) print("Sorted List\n" ) print(*__a ) if __name__ == "__main__": main()
59
import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) lowerCAmelCase = logging.getLogger() def _a ( ): """simple docstring""" lowercase__ = argparse.ArgumentParser() parser.add_argument('''-f''' ) lowercase__ = parser.parse_args() return args.f def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = {} lowercase__ = os.path.join(SCREAMING_SNAKE_CASE , '''all_results.json''' ) if os.path.exists(SCREAMING_SNAKE_CASE ): with open(SCREAMING_SNAKE_CASE , '''r''' ) as f: lowercase__ = json.load(SCREAMING_SNAKE_CASE ) else: raise ValueError(f'can\'t find {path}' ) return results def _a ( ): """simple docstring""" lowercase__ = torch.cuda.is_available() and torch_device == '''cuda''' return is_using_cuda and is_apex_available() lowerCAmelCase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class _a ( UpperCamelCase__ ): @classmethod def lowerCamelCase_ ( cls: int ) -> Any: """simple docstring""" lowercase__ = tempfile.mkdtemp() lowercase__ = os.path.join(cls.tmpdir , '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) lowercase__ = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def lowerCamelCase_ ( cls: Optional[Any] ) -> Dict: """simple docstring""" shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''' ) run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''glue_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Tuple ) -> Any: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertLess(result['''perplexity'''] , 100 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''clm_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Dict ) -> Optional[Any]: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertLess(result['''perplexity'''] , 42 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''mlm_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Any ) -> int: """simple docstring""" lowercase__ = 7 if get_gpu_count() > 1 else 2 lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertLess(result['''train_loss'''] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''ner_no_trainer''' ) ) ) @unittest.skip(reason='''Fix me @muellerzr''' ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Union[str, Any] ) -> int: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result['''eval_f1'''] , 28 ) self.assertGreaterEqual(result['''eval_exact'''] , 28 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''qa_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: int ) -> str: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''swag_no_trainer''' ) ) ) @slow @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Tuple ) -> Any: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertGreaterEqual(result['''eval_rouge1'''] , 10 ) self.assertGreaterEqual(result['''eval_rouge2'''] , 2 ) self.assertGreaterEqual(result['''eval_rougeL'''] , 7 ) self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''summarization_no_trainer''' ) ) ) @slow @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertGreaterEqual(result['''eval_bleu'''] , 30 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''translation_no_trainer''' ) ) ) @slow def lowerCamelCase_ ( self: Optional[int] ) -> Dict: """simple docstring""" lowercase__ = logging.StreamHandler(sys.stdout ) logger.addHandler(UpperCamelCase_ ) lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[Any]: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''' ) run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) # The base model scores a 25% self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''step_1''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''image_classification_no_trainer''' ) ) )
43
0
from __future__ import annotations from fractions import Fraction def a ( lowerCamelCase_ , lowerCamelCase_ ): '''simple docstring''' return ( num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den ) def a ( lowerCamelCase_ ): '''simple docstring''' lowercase__ = [] lowercase__ = 11 lowercase__ = int('''1''' + '''0''' * digit_len ) for num in range(lowerCamelCase_ , lowerCamelCase_ ): while den <= 99: if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): if is_digit_cancelling(lowerCamelCase_ , lowerCamelCase_ ): solutions.append(F"""{num}/{den}""" ) den += 1 num += 1 lowercase__ = 10 return solutions def a ( lowerCamelCase_ = 2 ): '''simple docstring''' lowercase__ = 1.0 for fraction in fraction_list(lowerCamelCase_ ): lowercase__ = Fraction(lowerCamelCase_ ) result *= frac.denominator / frac.numerator return int(lowerCamelCase_ ) if __name__ == "__main__": print(solution())
183
from ...utils import logging from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel from .configuration_mta import MTaConfig lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = 'T5Config' class _a ( UpperCamelCase__ ): _lowercase : Optional[int] = '''mt5''' _lowercase : str = MTaConfig class _a ( UpperCamelCase__ ): _lowercase : Optional[Any] = '''mt5''' _lowercase : Optional[Any] = MTaConfig class _a ( UpperCamelCase__ ): _lowercase : Tuple = '''mt5''' _lowercase : Optional[Any] = MTaConfig
43
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) __magic_name__ = { "configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"], "processing_speech_to_text": ["Speech2TextProcessor"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ["Speech2TextTokenizer"] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ["Speech2TextFeatureExtractor"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ "TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFSpeech2TextForConditionalGeneration", "TFSpeech2TextModel", "TFSpeech2TextPreTrainedModel", ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ "SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "Speech2TextForConditionalGeneration", "Speech2TextModel", "Speech2TextPreTrainedModel", ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
232
from datetime import datetime import matplotlib.pyplot as plt import torch def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" for param in module.parameters(): lowercase__ = False def _a ( ): """simple docstring""" lowercase__ = '''cuda''' if torch.cuda.is_available() else '''cpu''' if torch.backends.mps.is_available() and torch.backends.mps.is_built(): lowercase__ = '''mps''' if device == "mps": print( '''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch''' ''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues''' ''' with generations.''' ) return device def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = plt.imshow(SCREAMING_SNAKE_CASE ) fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE ) fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE ) plt.show() def _a ( ): """simple docstring""" lowercase__ = datetime.now() lowercase__ = current_time.strftime('''%H:%M:%S''' ) return timestamp
43
0
from collections.abc import Callable def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ ) -> int: '''simple docstring''' UpperCamelCase = a UpperCamelCase = b if function(lowercase_ ) == 0: # one of the a or b is a root for the function return a elif function(lowercase_ ) == 0: return b elif ( function(lowercase_ ) * function(lowercase_ ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError("could not find root in given interval." ) else: UpperCamelCase = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(lowercase_ ) == 0: return mid elif function(lowercase_ ) * function(lowercase_ ) < 0: UpperCamelCase = mid else: UpperCamelCase = mid UpperCamelCase = start + (end - start) / 2.0 return mid def __magic_name__ ( lowercase_ ) -> List[Any]: '''simple docstring''' return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_0_0_0)) import doctest doctest.testmod()
606
from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _a : def __init__( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Optional[Any]=13 , UpperCamelCase_: Any=30 , UpperCamelCase_: Union[str, Any]=2 , UpperCamelCase_: Tuple=3 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[Any]=32 , UpperCamelCase_: int=2 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=37 , UpperCamelCase_: int="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Optional[int]=10 , UpperCamelCase_: List[str]=0.02 , UpperCamelCase_: List[Any]=3 , UpperCamelCase_: Any=0.6 , UpperCamelCase_: Any=None , ) -> str: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = image_size lowercase__ = patch_size lowercase__ = num_channels lowercase__ = is_training lowercase__ = use_labels lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = type_sequence_label_size lowercase__ = initializer_range lowercase__ = mask_ratio lowercase__ = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowercase__ = (image_size // patch_size) ** 2 lowercase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def lowerCamelCase_ ( self: List[str] ) -> str: """simple docstring""" lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self: Dict ) -> List[str]: """simple docstring""" return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , UpperCamelCase_: List[Any] ) -> Optional[Any]: """simple docstring""" lowercase__ = TFViTMAEModel(config=UpperCamelCase_ ) lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: Any ) -> Union[str, Any]: """simple docstring""" lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ ) lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ ) # expected sequence length = num_patches lowercase__ = (self.image_size // self.patch_size) ** 2 lowercase__ = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images lowercase__ = 1 lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ ) lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ ) lowercase__ = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def lowerCamelCase_ ( self: str ) -> int: """simple docstring""" lowercase__ = self.prepare_config_and_inputs() ((lowercase__) , (lowercase__) , (lowercase__)) = config_and_inputs lowercase__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): _lowercase : int = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () _lowercase : List[str] = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {} _lowercase : Optional[int] = False _lowercase : List[str] = False _lowercase : Optional[int] = False _lowercase : Optional[int] = False def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]: """simple docstring""" lowercase__ = TFViTMAEModelTester(self ) lowercase__ = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 ) def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''ViTMAE does not use inputs_embeds''' ) def lowerCamelCase_ ( self: Dict ) -> List[str]: """simple docstring""" pass def lowerCamelCase_ ( self: List[Any] ) -> List[Any]: """simple docstring""" lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(UpperCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) lowercase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase_ , tf.keras.layers.Layer ) ) def lowerCamelCase_ ( self: Optional[int] ) -> List[str]: """simple docstring""" lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(UpperCamelCase_ ) lowercase__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ = [*signature.parameters.keys()] lowercase__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCamelCase_ ) def lowerCamelCase_ ( self: Tuple ) -> int: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def lowerCamelCase_ ( self: int ) -> str: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase_ ) def lowerCamelCase_ ( self: Union[str, Any] ) -> Any: """simple docstring""" np.random.seed(2 ) lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ = int((config.image_size // config.patch_size) ** 2 ) lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowercase__ = model_class(UpperCamelCase_ ) lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ ) lowercase__ = copy.deepcopy(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) ) lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ ) lowercase__ = outputs_dict[0].numpy() lowercase__ = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 ) def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]: """simple docstring""" np.random.seed(2 ) lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ = int((config.image_size // config.patch_size) ** 2 ) lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(UpperCamelCase_: List[Any] ): lowercase__ = {} for k, v in inputs_dict.items(): if tf.is_tensor(UpperCamelCase_ ): lowercase__ = v.numpy() else: lowercase__ = np.array(UpperCamelCase_ ) return inputs_np_dict for model_class in self.all_model_classes: lowercase__ = model_class(UpperCamelCase_ ) lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = prepare_numpy_arrays(UpperCamelCase_ ) lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ ) lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ ) self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase_ ( self: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] , UpperCamelCase_: Tuple ) -> str: """simple docstring""" np.random.seed(2 ) lowercase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowercase__ = tf.constant(UpperCamelCase_ ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowercase__ = tf_noise super().check_pt_tf_models(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase_ ( self: Dict ) -> Dict: """simple docstring""" np.random.seed(2 ) lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(UpperCamelCase_ ) if module_member_name.endswith('''MainLayer''' ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )] for module_member in (getattr(UpperCamelCase_ , UpperCamelCase_ ),) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(UpperCamelCase_ , '''_keras_serializable''' , UpperCamelCase_ ) } lowercase__ = int((config.image_size // config.patch_size) ** 2 ) lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowercase__ = tf.convert_to_tensor(UpperCamelCase_ ) inputs_dict.update({'''noise''': noise} ) for main_layer_class in tf_main_layer_classes: lowercase__ = main_layer_class(UpperCamelCase_ ) lowercase__ = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } lowercase__ = tf.keras.Model(UpperCamelCase_ , outputs=main_layer(UpperCamelCase_ ) ) lowercase__ = model(UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase__ = os.path.join(UpperCamelCase_ , '''keras_model.h5''' ) model.save(UpperCamelCase_ ) lowercase__ = tf.keras.models.load_model( UpperCamelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(UpperCamelCase_ , tf.keras.Model ) lowercase__ = model(UpperCamelCase_ ) self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ ) @slow def lowerCamelCase_ ( self: List[Any] ) -> Optional[Any]: """simple docstring""" np.random.seed(2 ) lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ = int((config.image_size // config.patch_size) ** 2 ) lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowercase__ = model_class(UpperCamelCase_ ) lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ ) if model_class.__name__ == "TFViTMAEModel": lowercase__ = outputs.last_hidden_state.numpy() lowercase__ = 0 else: lowercase__ = outputs.logits.numpy() lowercase__ = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase_ , saved_model=UpperCamelCase_ ) lowercase__ = model_class.from_pretrained(UpperCamelCase_ ) lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ ) if model_class.__name__ == "TFViTMAEModel": lowercase__ = after_outputs['''last_hidden_state'''].numpy() lowercase__ = 0 else: lowercase__ = after_outputs['''logits'''].numpy() lowercase__ = 0 lowercase__ = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(UpperCamelCase_ , 1E-5 ) def lowerCamelCase_ ( self: Tuple ) -> List[Any]: """simple docstring""" np.random.seed(2 ) lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ = int((config.image_size // config.patch_size) ** 2 ) lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowercase__ = model_class(UpperCamelCase_ ) lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ ) lowercase__ = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(UpperCamelCase_ ) lowercase__ = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config lowercase__ = model_class.from_config(model.config ) lowercase__ = new_model(UpperCamelCase_ ) # Build model new_model.set_weights(model.get_weights() ) lowercase__ = new_model(UpperCamelCase_ , noise=UpperCamelCase_ ) self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ ) @unittest.skip( reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.''' ) def lowerCamelCase_ ( self: Optional[int] ) -> str: """simple docstring""" pass @unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' ) def lowerCamelCase_ ( self: Any ) -> Dict: """simple docstring""" pass @slow def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]: """simple docstring""" lowercase__ = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' ) self.assertIsNotNone(UpperCamelCase_ ) def _a ( ): """simple docstring""" lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class _a ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self: Tuple ) -> Tuple: """simple docstring""" return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None @slow def lowerCamelCase_ ( self: int ) -> Optional[int]: """simple docstring""" np.random.seed(2 ) lowercase__ = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ) lowercase__ = self.default_image_processor lowercase__ = prepare_img() lowercase__ = image_processor(images=UpperCamelCase_ , return_tensors='''tf''' ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowercase__ = ViTMAEConfig() lowercase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) lowercase__ = np.random.uniform(size=(1, num_patches) ) # forward pass lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ ) # verify the logits lowercase__ = tf.convert_to_tensor([1, 196, 768] ) self.assertEqual(outputs.logits.shape , UpperCamelCase_ ) lowercase__ = tf.convert_to_tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , UpperCamelCase_ , atol=1E-4 )
43
0
__UpperCAmelCase : str = [0, 2, 4, 6, 8] __UpperCAmelCase : Union[str, Any] = [1, 3, 5, 7, 9] def lowercase_ ( __snake_case : int , __snake_case : str , __snake_case : Dict , __snake_case : Any ) -> List[Any]: '''simple docstring''' if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 snake_case__ :Optional[int] = 0 for digit in range(10 ): snake_case__ :Optional[int] = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , __snake_case , __snake_case ) return result snake_case__ :Union[str, Any] = 0 for digita in range(10 ): snake_case__ :Union[str, Any] = digita if (remainder + digita) % 2 == 0: snake_case__ :List[str] = ODD_DIGITS else: snake_case__ :List[Any] = EVEN_DIGITS for digita in other_parity_digits: snake_case__ :List[Any] = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , __snake_case , __snake_case , ) return result def lowercase_ ( __snake_case : int = 9 ) -> List[str]: '''simple docstring''' snake_case__ :Any = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(__snake_case , 0 , [0] * length , __snake_case ) return result if __name__ == "__main__": print(F'''{solution() = }''')
241
def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" return "".join([hex(SCREAMING_SNAKE_CASE )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE )] ) def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" if (len(SCREAMING_SNAKE_CASE ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(SCREAMING_SNAKE_CASE ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
43
0
import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class A_ ( UpperCamelCase__ ): _A :Any = (CMStochasticIterativeScheduler,) _A :Optional[int] = 10 def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , **snake_case__ : Dict ): lowercase = { """num_train_timesteps""": 2_01, """sigma_min""": 0.002, """sigma_max""": 80.0, } config.update(**UpperCamelCase_ ) return config def SCREAMING_SNAKE_CASE__ ( self : str ): lowercase = 10 lowercase = self.get_scheduler_config() lowercase = self.scheduler_classes[0](**UpperCamelCase_ ) scheduler.set_timesteps(UpperCamelCase_ ) lowercase = scheduler.timesteps[0] lowercase = scheduler.timesteps[1] lowercase = self.dummy_sample lowercase = 0.1 * sample lowercase = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample lowercase = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=UpperCamelCase_ ) def SCREAMING_SNAKE_CASE__ ( self : str ): for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=UpperCamelCase_ ) def SCREAMING_SNAKE_CASE__ ( self : Any ): lowercase = self.scheduler_classes[0] lowercase = self.get_scheduler_config() lowercase = scheduler_class(**UpperCamelCase_ ) lowercase = 1 scheduler.set_timesteps(UpperCamelCase_ ) lowercase = scheduler.timesteps lowercase = torch.manual_seed(0 ) lowercase = self.dummy_model() lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(UpperCamelCase_ ): # 1. scale model input lowercase = scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ ) # 2. predict noise residual lowercase = model(UpperCamelCase_ , UpperCamelCase_ ) # 3. predict previous sample x_t-1 lowercase = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample lowercase = pred_prev_sample lowercase = torch.sum(torch.abs(UpperCamelCase_ ) ) lowercase = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 192.7_614 ) < 1E-2 assert abs(result_mean.item() - 0.2_510 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): lowercase = self.scheduler_classes[0] lowercase = self.get_scheduler_config() lowercase = scheduler_class(**UpperCamelCase_ ) lowercase = [1_06, 0] scheduler.set_timesteps(timesteps=UpperCamelCase_ ) lowercase = scheduler.timesteps lowercase = torch.manual_seed(0 ) lowercase = self.dummy_model() lowercase = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input lowercase = scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ ) # 2. predict noise residual lowercase = model(UpperCamelCase_ , UpperCamelCase_ ) # 3. predict previous sample x_t-1 lowercase = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample lowercase = pred_prev_sample lowercase = torch.sum(torch.abs(UpperCamelCase_ ) ) lowercase = torch.mean(torch.abs(UpperCamelCase_ ) ) assert abs(result_sum.item() - 347.6_357 ) < 1E-2 assert abs(result_mean.item() - 0.4_527 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): lowercase = self.scheduler_classes[0] lowercase = self.get_scheduler_config() lowercase = scheduler_class(**UpperCamelCase_ ) lowercase = [39, 30, 12, 15, 0] with self.assertRaises(UpperCamelCase_ , msg="""`timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=UpperCamelCase_ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = self.scheduler_classes[0] lowercase = self.get_scheduler_config() lowercase = scheduler_class(**UpperCamelCase_ ) lowercase = [39, 30, 12, 1, 0] lowercase = len(UpperCamelCase_ ) with self.assertRaises(UpperCamelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_ ) def SCREAMING_SNAKE_CASE__ ( self : Dict ): lowercase = self.scheduler_classes[0] lowercase = self.get_scheduler_config() lowercase = scheduler_class(**UpperCamelCase_ ) lowercase = [scheduler.config.num_train_timesteps] with self.assertRaises( UpperCamelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=UpperCamelCase_ )
428
from __future__ import annotations def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ , lowercase__ = position lowercase__ = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] lowercase__ = [] for position in positions: lowercase__ , lowercase__ = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(SCREAMING_SNAKE_CASE ) return permissible_positions def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" return not any(elem == 0 for row in board for elem in row ) def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" if is_complete(SCREAMING_SNAKE_CASE ): return True for position in get_valid_pos(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ): lowercase__ , lowercase__ = position if board[y][x] == 0: lowercase__ = curr + 1 if open_knight_tour_helper(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , curr + 1 ): return True lowercase__ = 0 return False def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = [[0 for i in range(SCREAMING_SNAKE_CASE )] for j in range(SCREAMING_SNAKE_CASE )] for i in range(SCREAMING_SNAKE_CASE ): for j in range(SCREAMING_SNAKE_CASE ): lowercase__ = 1 if open_knight_tour_helper(SCREAMING_SNAKE_CASE , (i, j) , 1 ): return board lowercase__ = 0 lowercase__ = f'Open Kight Tour cannot be performed on a board of size {n}' raise ValueError(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
43
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __a = { 'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['LlamaTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['LlamaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'LlamaForCausalLM', 'LlamaModel', 'LlamaPreTrainedModel', 'LlamaForSequenceClassification', ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name lowerCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n' @dataclass class _a ( UpperCamelCase__ ): _lowercase : Union[PIL.Image.Image, np.ndarray] class _a ( UpperCamelCase__ ): def __init__( self: Dict , UpperCamelCase_: PriorTransformer , UpperCamelCase_: CLIPVisionModel , UpperCamelCase_: CLIPImageProcessor , UpperCamelCase_: HeunDiscreteScheduler , UpperCamelCase_: ShapERenderer , ) -> List[str]: """simple docstring""" super().__init__() self.register_modules( prior=UpperCamelCase_ , image_encoder=UpperCamelCase_ , image_processor=UpperCamelCase_ , scheduler=UpperCamelCase_ , renderer=UpperCamelCase_ , ) def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple ) -> List[Any]: """simple docstring""" if latents is None: lowercase__ = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ ) else: if latents.shape != shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' ) lowercase__ = latents.to(UpperCamelCase_ ) lowercase__ = latents * scheduler.init_noise_sigma return latents def lowerCamelCase_ ( self: str , UpperCamelCase_: Tuple=0 ) -> int: """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) lowercase__ = torch.device(f'cuda:{gpu_id}' ) lowercase__ = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(UpperCamelCase_ , UpperCamelCase_ ) @property def lowerCamelCase_ ( self: List[Any] ) -> Dict: """simple docstring""" if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ): return self.device for module in self.image_encoder.modules(): if ( hasattr(UpperCamelCase_ , '''_hf_hook''' ) and hasattr(module._hf_hook , '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Tuple , UpperCamelCase_: str , ) -> Any: """simple docstring""" if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , torch.Tensor ): lowercase__ = torch.cat(UpperCamelCase_ , axis=0 ) if image[0].ndim == 4 else torch.stack(UpperCamelCase_ , axis=0 ) if not isinstance(UpperCamelCase_ , torch.Tensor ): lowercase__ = self.image_processor(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 ) lowercase__ = image.to(dtype=self.image_encoder.dtype , device=UpperCamelCase_ ) lowercase__ = self.image_encoder(UpperCamelCase_ )['''last_hidden_state'''] lowercase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 lowercase__ = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 ) if do_classifier_free_guidance: lowercase__ = torch.zeros_like(UpperCamelCase_ ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes lowercase__ = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(UpperCamelCase_ ) def __call__( self: Tuple , UpperCamelCase_: Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 25 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 64 , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ) -> Union[str, Any]: """simple docstring""" if isinstance(UpperCamelCase_ , PIL.Image.Image ): lowercase__ = 1 elif isinstance(UpperCamelCase_ , torch.Tensor ): lowercase__ = image.shape[0] elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): lowercase__ = len(UpperCamelCase_ ) else: raise ValueError( f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(UpperCamelCase_ )}' ) lowercase__ = self._execution_device lowercase__ = batch_size * num_images_per_prompt lowercase__ = guidance_scale > 1.0 lowercase__ = self._encode_image(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # prior self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ ) lowercase__ = self.scheduler.timesteps lowercase__ = self.prior.config.num_embeddings lowercase__ = self.prior.config.embedding_dim lowercase__ = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim lowercase__ = latents.reshape(latents.shape[0] , UpperCamelCase_ , UpperCamelCase_ ) for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ): # expand the latents if we are doing classifier free guidance lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowercase__ = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = self.prior( UpperCamelCase_ , timestep=UpperCamelCase_ , proj_embedding=UpperCamelCase_ , ).predicted_image_embedding # remove the variance lowercase__ , lowercase__ = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: lowercase__ , lowercase__ = noise_pred.chunk(2 ) lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) lowercase__ = self.scheduler.step( UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=UpperCamelCase_ ) lowercase__ = [] for i, latent in enumerate(UpperCamelCase_ ): print() lowercase__ = self.renderer.decode( latent[None, :] , UpperCamelCase_ , size=UpperCamelCase_ , ray_batch_size=4_096 , n_coarse_samples=64 , n_fine_samples=128 , ) images.append(UpperCamelCase_ ) lowercase__ = torch.stack(UpperCamelCase_ ) if output_type not in ["np", "pil"]: raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' ) lowercase__ = images.cpu().numpy() if output_type == "pil": lowercase__ = [self.numpy_to_pil(UpperCamelCase_ ) for image in images] # Offload last model to CPU if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=UpperCamelCase_ )
43
0
class __magic_name__ : '''simple docstring''' def __init__( self:Tuple , _a:Dict ): snake_case__ = val snake_case__ = None snake_case__ = None def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Any ): if self.val: if val < self.val: if self.left is None: snake_case__ = Node(UpperCamelCase_ ) else: self.left.insert(UpperCamelCase_ ) elif val > self.val: if self.right is None: snake_case__ = Node(UpperCamelCase_ ) else: self.right.insert(UpperCamelCase_ ) else: snake_case__ = val def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Any: if root: inorder(root.left , __lowerCAmelCase ) res.append(root.val ) inorder(root.right , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> str: if len(__lowerCAmelCase ) == 0: return arr snake_case__ = Node(arr[0] ) for i in range(1 , len(__lowerCAmelCase ) ): root.insert(arr[i] ) # Traverse BST in order. snake_case__ = [] inorder(__lowerCAmelCase , __lowerCAmelCase ) return res if __name__ == "__main__": print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
33
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo lowerCAmelCase = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n' lowerCAmelCase = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n' lowerCAmelCase = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _a ( datasets.Metric ): def lowerCamelCase_ ( self: Tuple ) -> MetricInfo: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ), } ) , ) def lowerCamelCase_ ( self: str , UpperCamelCase_: List[List[List[str]]] , UpperCamelCase_: List[List[str]] , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 4 , ) -> Dict[str, float]: """simple docstring""" return { "google_bleu": gleu_score.corpus_gleu( list_of_references=UpperCamelCase_ , hypotheses=UpperCamelCase_ , min_len=UpperCamelCase_ , max_len=UpperCamelCase_ ) }
43
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL a : Any = logging.get_logger(__name__) def __magic_name__ ( __UpperCAmelCase ) -> Any: '''simple docstring''' if isinstance(__UpperCAmelCase, (list, tuple) ) and isinstance(videos[0], (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(__UpperCAmelCase, (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(__UpperCAmelCase ): return [[videos]] raise ValueError(F"Could not make batched video from {videos}" ) class a ( UpperCamelCase__ ): snake_case_ = ['''pixel_values'''] def __init__( self : List[str] , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , **lowercase_ : List[str] , ): super().__init__(**UpperCamelCase_ ) snake_case_ = size if size is not None else {'''shortest_edge''': 256} snake_case_ = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) snake_case_ = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} snake_case_ = get_size_dict(UpperCamelCase_ , param_name='''crop_size''' ) snake_case_ = do_resize snake_case_ = size snake_case_ = do_center_crop snake_case_ = crop_size snake_case_ = resample snake_case_ = do_rescale snake_case_ = rescale_factor snake_case_ = offset snake_case_ = do_normalize snake_case_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN snake_case_ = image_std if image_std is not None else IMAGENET_STANDARD_STD def A_ ( self : List[str] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Union[str, Any] , ): snake_case_ = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) if "shortest_edge" in size: snake_case_ = get_resize_output_image_size(UpperCamelCase_ , size['''shortest_edge'''] , default_to_square=UpperCamelCase_ ) elif "height" in size and "width" in size: snake_case_ = (size['''height'''], size['''width''']) else: raise ValueError(F"Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}" ) return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def A_ ( self : Tuple , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Tuple , ): snake_case_ = get_size_dict(UpperCamelCase_ ) if "height" not in size or "width" not in size: raise ValueError(F"Size must have \'height\' and \'width\' as keys. Got {size.keys()}" ) return center_crop(UpperCamelCase_ , size=(size['''height'''], size['''width''']) , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def A_ ( self : Dict , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : bool = True , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[int] , ): snake_case_ = image.astype(np.floataa ) if offset: snake_case_ = image - (scale / 2) return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def A_ ( self : List[Any] , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Any , ): return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def A_ ( self : Optional[int] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ): if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) if offset and not do_rescale: raise ValueError('''For offset, do_rescale must also be set to True.''' ) # All transformations expect numpy arrays. snake_case_ = to_numpy_array(UpperCamelCase_ ) if do_resize: snake_case_ = self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) if do_center_crop: snake_case_ = self.center_crop(UpperCamelCase_ , size=UpperCamelCase_ ) if do_rescale: snake_case_ = self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ , offset=UpperCamelCase_ ) if do_normalize: snake_case_ = self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) snake_case_ = to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) return image def A_ ( self : List[Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : Tuple , ): snake_case_ = do_resize if do_resize is not None else self.do_resize snake_case_ = resample if resample is not None else self.resample snake_case_ = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case_ = do_rescale if do_rescale is not None else self.do_rescale snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case_ = offset if offset is not None else self.offset snake_case_ = do_normalize if do_normalize is not None else self.do_normalize snake_case_ = image_mean if image_mean is not None else self.image_mean snake_case_ = image_std if image_std is not None else self.image_std snake_case_ = size if size is not None else self.size snake_case_ = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) snake_case_ = crop_size if crop_size is not None else self.crop_size snake_case_ = get_size_dict(UpperCamelCase_ , param_name='''crop_size''' ) if not valid_images(UpperCamelCase_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) snake_case_ = make_batched(UpperCamelCase_ ) snake_case_ = [ [ self._preprocess_image( image=UpperCamelCase_ , do_resize=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , do_center_crop=UpperCamelCase_ , crop_size=UpperCamelCase_ , do_rescale=UpperCamelCase_ , rescale_factor=UpperCamelCase_ , offset=UpperCamelCase_ , do_normalize=UpperCamelCase_ , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ , data_format=UpperCamelCase_ , ) for img in video ] for video in videos ] snake_case_ = {'''pixel_values''': videos} return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
640
import unittest from diffusers.models.unet_ad_blocks import * # noqa F403 from diffusers.utils import torch_device from .test_unet_blocks_common import UNetBlockTesterMixin class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Optional[Any] = DownBlockaD # noqa F405 _lowercase : Dict = '''down''' def lowerCamelCase_ ( self: List[str] ) -> Tuple: """simple docstring""" lowercase__ = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : List[str] = ResnetDownsampleBlockaD # noqa F405 _lowercase : Tuple = '''down''' def lowerCamelCase_ ( self: List[Any] ) -> str: """simple docstring""" lowercase__ = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Any = AttnDownBlockaD # noqa F405 _lowercase : List[Any] = '''down''' def lowerCamelCase_ ( self: Dict ) -> List[str]: """simple docstring""" lowercase__ = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Tuple = CrossAttnDownBlockaD # noqa F405 _lowercase : Optional[int] = '''down''' def lowerCamelCase_ ( self: Optional[Any] ) -> Any: """simple docstring""" lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common() lowercase__ = 32 return init_dict, inputs_dict def lowerCamelCase_ ( self: str ) -> Tuple: """simple docstring""" lowercase__ = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Any = SimpleCrossAttnDownBlockaD # noqa F405 _lowercase : str = '''down''' @property def lowerCamelCase_ ( self: Optional[Any] ) -> List[Any]: """simple docstring""" return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ ) def lowerCamelCase_ ( self: List[str] ) -> List[str]: """simple docstring""" lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common() lowercase__ = 32 return init_dict, inputs_dict @unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' ) def lowerCamelCase_ ( self: Any ) -> int: """simple docstring""" lowercase__ = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Tuple = SkipDownBlockaD # noqa F405 _lowercase : Tuple = '''down''' @property def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]: """simple docstring""" return super().get_dummy_input(include_skip_sample=UpperCamelCase_ ) def lowerCamelCase_ ( self: Dict ) -> List[Any]: """simple docstring""" lowercase__ = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Optional[int] = AttnSkipDownBlockaD # noqa F405 _lowercase : Optional[int] = '''down''' @property def lowerCamelCase_ ( self: str ) -> int: """simple docstring""" return super().get_dummy_input(include_skip_sample=UpperCamelCase_ ) def lowerCamelCase_ ( self: Tuple ) -> Any: """simple docstring""" lowercase__ = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : int = DownEncoderBlockaD # noqa F405 _lowercase : List[Any] = '''down''' @property def lowerCamelCase_ ( self: List[str] ) -> str: """simple docstring""" return super().get_dummy_input(include_temb=UpperCamelCase_ ) def lowerCamelCase_ ( self: Any ) -> List[Any]: """simple docstring""" lowercase__ = { '''in_channels''': 32, '''out_channels''': 32, } lowercase__ = self.dummy_input return init_dict, inputs_dict def lowerCamelCase_ ( self: str ) -> Dict: """simple docstring""" lowercase__ = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : List[str] = AttnDownEncoderBlockaD # noqa F405 _lowercase : int = '''down''' @property def lowerCamelCase_ ( self: Dict ) -> Optional[Any]: """simple docstring""" return super().get_dummy_input(include_temb=UpperCamelCase_ ) def lowerCamelCase_ ( self: str ) -> List[str]: """simple docstring""" lowercase__ = { '''in_channels''': 32, '''out_channels''': 32, } lowercase__ = self.dummy_input return init_dict, inputs_dict def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]: """simple docstring""" lowercase__ = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Union[str, Any] = UNetMidBlockaD # noqa F405 _lowercase : Union[str, Any] = '''mid''' def lowerCamelCase_ ( self: Any ) -> int: """simple docstring""" lowercase__ = { '''in_channels''': 32, '''temb_channels''': 128, } lowercase__ = self.dummy_input return init_dict, inputs_dict def lowerCamelCase_ ( self: Any ) -> Any: """simple docstring""" lowercase__ = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Optional[int] = UNetMidBlockaDCrossAttn # noqa F405 _lowercase : str = '''mid''' def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]: """simple docstring""" lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common() lowercase__ = 32 return init_dict, inputs_dict def lowerCamelCase_ ( self: Dict ) -> List[str]: """simple docstring""" lowercase__ = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Tuple = UNetMidBlockaDSimpleCrossAttn # noqa F405 _lowercase : str = '''mid''' @property def lowerCamelCase_ ( self: int ) -> List[Any]: """simple docstring""" return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ ) def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[Any]: """simple docstring""" lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common() lowercase__ = 32 return init_dict, inputs_dict def lowerCamelCase_ ( self: Union[str, Any] ) -> int: """simple docstring""" lowercase__ = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Union[str, Any] = UpBlockaD # noqa F405 _lowercase : Any = '''up''' @property def lowerCamelCase_ ( self: str ) -> str: """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ ) def lowerCamelCase_ ( self: int ) -> List[Any]: """simple docstring""" lowercase__ = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Tuple = ResnetUpsampleBlockaD # noqa F405 _lowercase : List[Any] = '''up''' @property def lowerCamelCase_ ( self: List[Any] ) -> Union[str, Any]: """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ ) def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[int]: """simple docstring""" lowercase__ = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Any = CrossAttnUpBlockaD # noqa F405 _lowercase : List[str] = '''up''' @property def lowerCamelCase_ ( self: int ) -> Any: """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ ) def lowerCamelCase_ ( self: Any ) -> Any: """simple docstring""" lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common() lowercase__ = 32 return init_dict, inputs_dict def lowerCamelCase_ ( self: Dict ) -> Optional[int]: """simple docstring""" lowercase__ = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Union[str, Any] = SimpleCrossAttnUpBlockaD # noqa F405 _lowercase : Dict = '''up''' @property def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]: """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ , include_encoder_hidden_states=UpperCamelCase_ ) def lowerCamelCase_ ( self: str ) -> int: """simple docstring""" lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common() lowercase__ = 32 return init_dict, inputs_dict def lowerCamelCase_ ( self: Union[str, Any] ) -> int: """simple docstring""" lowercase__ = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : List[str] = AttnUpBlockaD # noqa F405 _lowercase : Optional[Any] = '''up''' @property def lowerCamelCase_ ( self: Tuple ) -> int: """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ ) @unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' ) def lowerCamelCase_ ( self: List[str] ) -> List[str]: """simple docstring""" lowercase__ = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Dict = SkipUpBlockaD # noqa F405 _lowercase : Optional[int] = '''up''' @property def lowerCamelCase_ ( self: Dict ) -> int: """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ ) def lowerCamelCase_ ( self: Optional[Any] ) -> Dict: """simple docstring""" lowercase__ = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : List[str] = AttnSkipUpBlockaD # noqa F405 _lowercase : str = '''up''' @property def lowerCamelCase_ ( self: Optional[Any] ) -> Dict: """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ ) def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]: """simple docstring""" lowercase__ = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Dict = UpDecoderBlockaD # noqa F405 _lowercase : Tuple = '''up''' @property def lowerCamelCase_ ( self: int ) -> str: """simple docstring""" return super().get_dummy_input(include_temb=UpperCamelCase_ ) def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]: """simple docstring""" lowercase__ = {'''in_channels''': 32, '''out_channels''': 32} lowercase__ = self.dummy_input return init_dict, inputs_dict def lowerCamelCase_ ( self: Tuple ) -> Any: """simple docstring""" lowercase__ = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Optional[int] = AttnUpDecoderBlockaD # noqa F405 _lowercase : str = '''up''' @property def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]: """simple docstring""" return super().get_dummy_input(include_temb=UpperCamelCase_ ) def lowerCamelCase_ ( self: Dict ) -> List[str]: """simple docstring""" lowercase__ = {'''in_channels''': 32, '''out_channels''': 32} lowercase__ = self.dummy_input return init_dict, inputs_dict def lowerCamelCase_ ( self: int ) -> Optional[Any]: """simple docstring""" lowercase__ = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568] super().test_output(UpperCamelCase_ )
43
0
from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __A : def __init__( self :List[Any] , __snake_case :Dict , __snake_case :int=13 , __snake_case :str=30 , __snake_case :str=2 , __snake_case :int=3 , __snake_case :Any=True , __snake_case :Optional[int]=True , __snake_case :Optional[Any]=32 , __snake_case :Union[str, Any]=2 , __snake_case :Any=4 , __snake_case :List[Any]=37 , __snake_case :Union[str, Any]="gelu" , __snake_case :Optional[int]=0.1 , __snake_case :List[Any]=0.1 , __snake_case :str=10 , __snake_case :List[Any]=0.02 , __snake_case :Optional[Any]=3 , __snake_case :str=None , ): '''simple docstring''' __magic_name__ : List[Any] =parent __magic_name__ : Optional[Any] =batch_size __magic_name__ : Any =image_size __magic_name__ : int =patch_size __magic_name__ : Optional[int] =num_channels __magic_name__ : Optional[Any] =is_training __magic_name__ : Tuple =use_labels __magic_name__ : Optional[Any] =hidden_size __magic_name__ : int =num_hidden_layers __magic_name__ : Union[str, Any] =num_attention_heads __magic_name__ : List[str] =intermediate_size __magic_name__ : Any =hidden_act __magic_name__ : Dict =hidden_dropout_prob __magic_name__ : Dict =attention_probs_dropout_prob __magic_name__ : Optional[Any] =type_sequence_label_size __magic_name__ : Optional[int] =initializer_range __magic_name__ : List[str] =scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __magic_name__ : Union[str, Any] =(image_size // patch_size) ** 2 __magic_name__ : Dict =num_patches + 1 def A__ ( self :Dict ): '''simple docstring''' __magic_name__ : int =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __magic_name__ : Optional[Any] =None if self.use_labels: __magic_name__ : int =ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ : Dict =self.get_config() return config, pixel_values, labels def A__ ( self :Optional[int] ): '''simple docstring''' return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , ) def A__ ( self :Dict , __snake_case :str , __snake_case :Dict , __snake_case :Optional[Any] ): '''simple docstring''' __magic_name__ : Dict =TFViTModel(config=UpperCamelCase_ ) __magic_name__ : Any =model(UpperCamelCase_ , training=UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. __magic_name__ : str =self.image_size // 2 __magic_name__ : Optional[int] =pixel_values[:, :, :image_size, :image_size] __magic_name__ : Optional[Any] =model(UpperCamelCase_ , interpolate_pos_encoding=UpperCamelCase_ , training=UpperCamelCase_ ) __magic_name__ : Any =(image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) ) def A__ ( self :Optional[int] , __snake_case :Dict , __snake_case :Any , __snake_case :int ): '''simple docstring''' __magic_name__ : str =self.type_sequence_label_size __magic_name__ : Union[str, Any] =TFViTForImageClassification(UpperCamelCase_ ) __magic_name__ : str =model(UpperCamelCase_ , labels=UpperCamelCase_ , training=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. __magic_name__ : Dict =self.image_size // 2 __magic_name__ : Any =pixel_values[:, :, :image_size, :image_size] __magic_name__ : Dict =model(UpperCamelCase_ , interpolate_pos_encoding=UpperCamelCase_ , training=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __magic_name__ : int =1 __magic_name__ : Union[str, Any] =TFViTForImageClassification(UpperCamelCase_ ) __magic_name__ : Union[str, Any] =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __magic_name__ : Dict =model(UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def A__ ( self :Any ): '''simple docstring''' __magic_name__ : Union[str, Any] =self.prepare_config_and_inputs() __magic_name__ , __magic_name__ , __magic_name__ : List[str] =config_and_inputs __magic_name__ : Optional[int] ={"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): UpperCamelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () UpperCamelCase = ( {'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification} if is_tf_available() else {} ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False def A__ ( self :Optional[Any] ): '''simple docstring''' __magic_name__ : Dict =TFViTModelTester(self ) __magic_name__ : List[Any] =ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 ) def A__ ( self :Dict ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""ViT does not use inputs_embeds""" ) def A__ ( self :List[str] ): '''simple docstring''' pass @unittest.skip(reason="""ViT does not use inputs_embeds""" ) def A__ ( self :List[Any] ): '''simple docstring''' pass def A__ ( self :int ): '''simple docstring''' __magic_name__ , __magic_name__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ : Dict =model_class(UpperCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) __magic_name__ : Union[str, Any] =model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase_ , tf.keras.layers.Layer ) ) def A__ ( self :str ): '''simple docstring''' __magic_name__ , __magic_name__ : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ : Optional[int] =model_class(UpperCamelCase_ ) __magic_name__ : List[str] =inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __magic_name__ : Optional[int] =[*signature.parameters.keys()] __magic_name__ : Union[str, Any] =["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase_ ) def A__ ( self :List[str] ): '''simple docstring''' __magic_name__ : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def A__ ( self :Tuple ): '''simple docstring''' __magic_name__ : Optional[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ ) @slow def A__ ( self :List[Any] ): '''simple docstring''' __magic_name__ : int =TFViTModel.from_pretrained("""google/vit-base-patch16-224""" ) self.assertIsNotNone(UpperCamelCase_ ) def lowerCAmelCase_ ( ): __magic_name__ : str =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class __A ( unittest.TestCase ): @cached_property def A__ ( self :List[Any] ): '''simple docstring''' return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None @slow def A__ ( self :Any ): '''simple docstring''' __magic_name__ : Optional[Any] =TFViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ) __magic_name__ : str =self.default_image_processor __magic_name__ : List[Any] =prepare_img() __magic_name__ : int =image_processor(images=UpperCamelCase_ , return_tensors="""tf""" ) # forward pass __magic_name__ : List[str] =model(**UpperCamelCase_ ) # verify the logits __magic_name__ : List[Any] =tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , UpperCamelCase_ ) __magic_name__ : str =tf.constant([-0.2744, 0.8215, -0.0836] ) tf.debugging.assert_near(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 )
21
def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" lowercase__ = set() # Replace all the whitespace in our sentence lowercase__ = input_str.replace(''' ''' , '''''' ) for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower() ) return len(SCREAMING_SNAKE_CASE ) == 26 def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" lowercase__ = [False] * 26 for char in input_str: if char.islower(): lowercase__ = True elif char.isupper(): lowercase__ = True return all(SCREAMING_SNAKE_CASE ) def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" return len({char for char in input_str.lower() if char.isalpha()} ) == 26 def _a ( ): """simple docstring""" from timeit import timeit lowercase__ = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest''' print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE ) ) print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE ) ) print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE ) ) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
43
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __SCREAMING_SNAKE_CASE : str ={ 'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'], 'tokenization_convbert': ['ConvBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Optional[Any] =['ConvBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Any =[ 'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvBertForMaskedLM', 'ConvBertForMultipleChoice', 'ConvBertForQuestionAnswering', 'ConvBertForSequenceClassification', 'ConvBertForTokenClassification', 'ConvBertLayer', 'ConvBertModel', 'ConvBertPreTrainedModel', 'load_tf_weights_in_convbert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Optional[Any] =[ 'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFConvBertForMaskedLM', 'TFConvBertForMultipleChoice', 'TFConvBertForQuestionAnswering', 'TFConvBertForSequenceClassification', 'TFConvBertForTokenClassification', 'TFConvBertLayer', 'TFConvBertModel', 'TFConvBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE : Optional[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
135
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ = np.full((len(SCREAMING_SNAKE_CASE ), sequence_length, 2) , SCREAMING_SNAKE_CASE ) else: lowercase__ = np.full((len(SCREAMING_SNAKE_CASE ), sequence_length) , SCREAMING_SNAKE_CASE ) for i, tensor in enumerate(SCREAMING_SNAKE_CASE ): if padding_side == "right": if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ = tensor[:sequence_length] else: lowercase__ = tensor[:sequence_length] else: if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ = tensor[:sequence_length] else: lowercase__ = tensor[:sequence_length] return out_tensor.tolist() def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = ord(SCREAMING_SNAKE_CASE ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26): return True lowercase__ = unicodedata.category(SCREAMING_SNAKE_CASE ) if cat.startswith('''P''' ): return True return False @dataclass class _a ( UpperCamelCase__ ): _lowercase : PreTrainedTokenizerBase _lowercase : Union[bool, str, PaddingStrategy] = True _lowercase : Optional[int] = None _lowercase : Optional[int] = None _lowercase : int = -100 _lowercase : str = "pt" def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Optional[Any] ) -> List[Any]: """simple docstring""" import torch lowercase__ = '''label''' if '''label''' in features[0].keys() else '''labels''' lowercase__ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None lowercase__ = self.tokenizer.pad( UpperCamelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , ) if labels is None: return batch lowercase__ = torch.tensor(batch['''entity_ids'''] ).shape[1] lowercase__ = self.tokenizer.padding_side if padding_side == "right": lowercase__ = [ list(UpperCamelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCamelCase_ )) for label in labels ] else: lowercase__ = [ [self.label_pad_token_id] * (sequence_length - len(UpperCamelCase_ )) + list(UpperCamelCase_ ) for label in labels ] lowercase__ = [feature['''ner_tags'''] for feature in features] lowercase__ = padding_tensor(UpperCamelCase_ , -1 , UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = [feature['''original_entity_spans'''] for feature in features] lowercase__ = padding_tensor(UpperCamelCase_ , (-1, -1) , UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = {k: torch.tensor(UpperCamelCase_ , dtype=torch.intaa ) for k, v in batch.items()} return batch
43
0
import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __A = logging.get_logger(__name__) __A = { "vocab_file": "vocab.json", "tokenizer_config_file": "tokenizer_config.json", "merges_file": "merges.txt", } __A = { "vocab_file": { "facebook/s2t-wav2vec2-large-en-de": ( "https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json" ), }, "tokenizer_config_file": { "facebook/s2t-wav2vec2-large-en-de": ( "https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json" ), }, "merges_file": { "facebook/s2t-wav2vec2-large-en-de": ( "https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt" ), }, } __A = "</w>" __A = "@@ " def lowerCAmelCase_ ( __a ) -> int: """simple docstring""" lowerCamelCase__: List[Any] =set() lowerCamelCase__: Union[str, Any] =word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase__: Optional[Any] =char return pairs # Speech2Text2 has no max input length __A = {"facebook/s2t-wav2vec2-large-en-de": 1024} class _SCREAMING_SNAKE_CASE ( UpperCamelCase__ ): '''simple docstring''' lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = ['''input_ids''', '''attention_mask'''] def __init__(self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]="<s>" , UpperCAmelCase_ : Dict="<pad>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Any="<unk>" , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : Dict , ) ->Dict: '''simple docstring''' super().__init__( unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , **UpperCamelCase_ , ) lowerCamelCase__: Dict =do_lower_case with open(UpperCamelCase_ , encoding="utf-8") as vocab_handle: lowerCamelCase__: Any =json.load(UpperCamelCase_) lowerCamelCase__: str ={v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""") lowerCamelCase__: Tuple =None lowerCamelCase__: str =None else: with open(UpperCamelCase_ , encoding="utf-8") as merges_handle: lowerCamelCase__: Dict =merges_handle.read().split("\n")[:-1] lowerCamelCase__: int =[tuple(merge.split()[:2]) for merge in merges] lowerCamelCase__: Any =dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_)))) lowerCamelCase__: int ={} @property def SCREAMING_SNAKE_CASE_ (self : str) ->int: '''simple docstring''' return len(self.decoder) def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Dict: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder) def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Optional[int]) ->str: '''simple docstring''' lowerCamelCase__: str =tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] lowerCamelCase__: Optional[Any] =get_pairs(UpperCamelCase_) if not pairs: return token while True: lowerCamelCase__: Tuple =min(UpperCamelCase_ , key=lambda UpperCAmelCase_: self.bpe_ranks.get(UpperCamelCase_ , float("inf"))) if bigram not in self.bpe_ranks: break lowerCamelCase__ , lowerCamelCase__: List[Any] =bigram lowerCamelCase__: Optional[int] =[] lowerCamelCase__: Optional[int] =0 while i < len(UpperCamelCase_): try: lowerCamelCase__: List[Any] =word.index(UpperCamelCase_ , UpperCamelCase_) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) lowerCamelCase__: Any =j if word[i] == first and i < len(UpperCamelCase_) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 lowerCamelCase__: Optional[int] =tuple(UpperCamelCase_) lowerCamelCase__: Optional[int] =new_word if len(UpperCamelCase_) == 1: break else: lowerCamelCase__: Tuple =get_pairs(UpperCamelCase_) lowerCamelCase__: Union[str, Any] =" ".join(UpperCamelCase_) if word == "\n " + BPE_TOKEN_MERGES: lowerCamelCase__: Union[str, Any] ="\n" + BPE_TOKEN_MERGES if word.endswith(UpperCamelCase_): lowerCamelCase__: Dict =word.replace(UpperCamelCase_ , "") lowerCamelCase__: int =word.replace(" " , UpperCamelCase_) lowerCamelCase__: Optional[int] =word return word def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : str) ->Optional[int]: '''simple docstring''' if self.bpe_ranks is None: raise ValueError( "This tokenizer was instantiated without a `merges.txt` file, so" " that it can only be used for decoding, not for encoding." "Make sure to provide `merges.txt` file at instantiation to enable " "encoding.") if self.do_lower_case: lowerCamelCase__: Dict =text.lower() lowerCamelCase__: List[Any] =text.split() lowerCamelCase__: List[str] =[] for token in text: if token: split_tokens.extend(list(self.bpe(UpperCamelCase_).split(" "))) return split_tokens def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : str) ->int: '''simple docstring''' return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token)) def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : int) ->str: '''simple docstring''' lowerCamelCase__: List[Any] =self.decoder.get(UpperCamelCase_ , self.unk_token) return result def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : List[str]) ->str: '''simple docstring''' lowerCamelCase__: int =" ".join(UpperCamelCase_) # make sure @@ tokens are concatenated lowerCamelCase__: str ="".join(string.split(UpperCamelCase_)) return string def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None) ->Tuple[str]: '''simple docstring''' if not os.path.isdir(UpperCamelCase_): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""") return lowerCamelCase__: Optional[int] =os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) lowerCamelCase__: Optional[int] =os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]) with open(UpperCamelCase_ , "w" , encoding="utf-8") as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_) + "\n") lowerCamelCase__: List[Any] =0 if self.bpe_ranks is None: return (vocab_file,) with open(UpperCamelCase_ , "w" , encoding="utf-8") as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_: kv[1]): if index != token_index: logger.warning( F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.""" " Please check that the tokenizer is not corrupted!") lowerCamelCase__: Tuple =token_index writer.write(" ".join(UpperCamelCase_) + "\n") index += 1 return (vocab_file, merges_file)
59
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class _a ( UpperCamelCase__ ): def __init__( self: int , *UpperCamelCase_: str , UpperCamelCase_: List[str]=None , UpperCamelCase_: int=None , **UpperCamelCase_: Optional[Any] ) -> List[str]: """simple docstring""" super().__init__(*UpperCamelCase_ , **UpperCamelCase_ ) lowercase__ = eval_examples lowercase__ = post_process_function def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Optional[Dataset] = None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: str = "eval" , **UpperCamelCase_: int , ) -> Dict[str, float]: """simple docstring""" lowercase__ = gen_kwargs.copy() lowercase__ = ( gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length ) lowercase__ = ( gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams ) lowercase__ = gen_kwargs lowercase__ = self.eval_dataset if eval_dataset is None else eval_dataset lowercase__ = self.get_eval_dataloader(UpperCamelCase_ ) lowercase__ = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. lowercase__ = self.compute_metrics lowercase__ = None lowercase__ = time.time() lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: lowercase__ = eval_loop( UpperCamelCase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , ) finally: lowercase__ = compute_metrics lowercase__ = self.args.eval_batch_size * self.args.world_size if f'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time'] output.metrics.update( speed_metrics( UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = self.compute_metrics(UpperCamelCase_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'{metric_key_prefix}_' ): lowercase__ = metrics.pop(UpperCamelCase_ ) metrics.update(output.metrics ) else: lowercase__ = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(UpperCamelCase_ ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) lowercase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase_ ) return metrics def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: List[str]=None , UpperCamelCase_: str = "test" , **UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]: """simple docstring""" lowercase__ = gen_kwargs.copy() lowercase__ = self.get_test_dataloader(UpperCamelCase_ ) # Temporarily disable metric computation, we will do it in the loop here. lowercase__ = self.compute_metrics lowercase__ = None lowercase__ = time.time() lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: lowercase__ = eval_loop( UpperCamelCase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , ) finally: lowercase__ = compute_metrics lowercase__ = self.args.eval_batch_size * self.args.world_size if f'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time'] output.metrics.update( speed_metrics( UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , '''predict''' ) lowercase__ = self.compute_metrics(UpperCamelCase_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'{metric_key_prefix}_' ): lowercase__ = metrics.pop(UpperCamelCase_ ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase_ )
43
0
from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class _UpperCAmelCase ( UpperCamelCase__ ): """simple docstring""" lowercase__ = '''new-model''' if is_tf_available(): class _UpperCAmelCase ( UpperCamelCase__ ): """simple docstring""" lowercase__ = NewModelConfig @require_tf class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def lowercase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase__ = '''bert-base-cased''' lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_, UpperCamelCase_ ) lowercase__ = TFAutoModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_, UpperCamelCase_ ) @slow def lowercase__ ( self : Tuple ): '''simple docstring''' lowercase__ = '''bert-base-cased''' lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_, UpperCamelCase_ ) lowercase__ = TFAutoModelForPreTraining.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_, UpperCamelCase_ ) @slow def lowercase__ ( self : List[str] ): '''simple docstring''' for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_, UpperCamelCase_ ) lowercase__ = TFAutoModelForCausalLM.from_pretrained(UpperCamelCase_ ) lowercase__ , lowercase__ = TFAutoModelForCausalLM.from_pretrained(UpperCamelCase_, output_loading_info=UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_, UpperCamelCase_ ) @slow def lowercase__ ( self : List[str] ): '''simple docstring''' for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_, UpperCamelCase_ ) lowercase__ = TFAutoModelWithLMHead.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_, UpperCamelCase_ ) @slow def lowercase__ ( self : List[Any] ): '''simple docstring''' for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_, UpperCamelCase_ ) lowercase__ = TFAutoModelForMaskedLM.from_pretrained(UpperCamelCase_ ) lowercase__ , lowercase__ = TFAutoModelForMaskedLM.from_pretrained(UpperCamelCase_, output_loading_info=UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_, UpperCamelCase_ ) @slow def lowercase__ ( self : Optional[Any] ): '''simple docstring''' for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_, UpperCamelCase_ ) lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase_ ) lowercase__ , lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase_, output_loading_info=UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_, UpperCamelCase_ ) @slow def lowercase__ ( self : Any ): '''simple docstring''' for model_name in ["bert-base-uncased"]: lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_, UpperCamelCase_ ) lowercase__ = TFAutoModelForSequenceClassification.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_, UpperCamelCase_ ) @slow def lowercase__ ( self : List[str] ): '''simple docstring''' for model_name in ["bert-base-uncased"]: lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_, UpperCamelCase_ ) lowercase__ = TFAutoModelForQuestionAnswering.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_, UpperCamelCase_ ) @slow @require_tensorflow_probability def lowercase__ ( self : Optional[int] ): '''simple docstring''' for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: lowercase__ = AutoConfig.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_, UpperCamelCase_ ) lowercase__ = TFAutoModelForTableQuestionAnswering.from_pretrained(UpperCamelCase_ ) lowercase__ , lowercase__ = TFAutoModelForTableQuestionAnswering.from_pretrained( UpperCamelCase_, output_loading_info=UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_, UpperCamelCase_ ) def lowercase__ ( self : Tuple ): '''simple docstring''' lowercase__ = TFAutoModelWithLMHead.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_, UpperCamelCase_ ) self.assertEqual(model.num_parameters(), 14_410 ) self.assertEqual(model.num_parameters(only_trainable=UpperCamelCase_ ), 14_410 ) def lowercase__ ( self : str ): '''simple docstring''' lowercase__ = TFAutoModelWithLMHead.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_, UpperCamelCase_ ) self.assertEqual(model.num_parameters(), 14_410 ) self.assertEqual(model.num_parameters(only_trainable=UpperCamelCase_ ), 14_410 ) def lowercase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase__ = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' ) self.assertIsInstance(UpperCamelCase_, UpperCamelCase_ ) lowercase__ = copy.deepcopy(model.config ) lowercase__ = ['''FunnelBaseModel'''] lowercase__ = TFAutoModel.from_config(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_, UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(UpperCamelCase_ ) lowercase__ = TFAutoModel.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_, UpperCamelCase_ ) def lowercase__ ( self : Optional[Any] ): '''simple docstring''' try: AutoConfig.register('''new-model''', UpperCamelCase_ ) lowercase__ = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__ ): # Wrong config class will raise an error with self.assertRaises(UpperCamelCase_ ): auto_class.register(UpperCamelCase_, UpperCamelCase_ ) auto_class.register(UpperCamelCase_, UpperCamelCase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCamelCase_ ): auto_class.register(UpperCamelCase_, UpperCamelCase_ ) # Now that the config is registered, it can be used as any other config with the auto-API lowercase__ = BertModelTester(self ).get_config() lowercase__ = NewModelConfig(**tiny_config.to_dict() ) lowercase__ = auto_class.from_config(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_, UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(UpperCamelCase_ ) lowercase__ = auto_class.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_, UpperCamelCase_ ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def lowercase__ ( self : Optional[Any] ): '''simple docstring''' with self.assertRaisesRegex( UpperCamelCase_, '''bert-base is not a local folder and is not a valid model identifier''' ): lowercase__ = TFAutoModel.from_pretrained('''bert-base''' ) def lowercase__ ( self : List[Any] ): '''simple docstring''' with self.assertRaisesRegex( UpperCamelCase_, R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): lowercase__ = TFAutoModel.from_pretrained(UpperCamelCase_, revision='''aaaaaa''' ) def lowercase__ ( self : Any ): '''simple docstring''' with self.assertRaisesRegex( UpperCamelCase_, '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''', ): lowercase__ = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def lowercase__ ( self : List[str] ): '''simple docstring''' with self.assertRaisesRegex(UpperCamelCase_, '''Use `from_pt=True` to load this model''' ): lowercase__ = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' ) def lowercase__ ( self : str ): '''simple docstring''' lowercase__ = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: lowercase__ = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count, 0 ) self.assertEqual(counter.head_request_count, 1 ) self.assertEqual(counter.other_request_count, 0 ) # With a sharded checkpoint lowercase__ = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' ) with RequestCounter() as counter: lowercase__ = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' ) self.assertEqual(counter.get_request_count, 0 ) self.assertEqual(counter.head_request_count, 1 ) self.assertEqual(counter.other_request_count, 0 )
183
import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = os.path.join(args.tf_model_dir , '''parameters.json''' ) lowercase__ = json.loads(open(SCREAMING_SNAKE_CASE ).read() ) if not params: raise ValueError( f'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' ) if not args.output.endswith('''.pt''' ): lowercase__ = args.output + '''.pt''' lowercase__ = OrderedDict() with tf.device('''/CPU:0''' ): lowercase__ = tf.train.load_checkpoint(args.tf_model_dir ) lowercase__ = reader.get_variable_to_shape_map() for key_name in shapes.keys(): lowercase__ = reader.get_tensor(SCREAMING_SNAKE_CASE ).astype(np.floataa ) if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ): continue if key_name.startswith('''pasts/''' ): if key_name.startswith('''pasts/mlp''' ): lowercase__ = int(key_name[9] ) elif key_name.startswith('''pasts/out''' ): lowercase__ = 8 lowercase__ = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/moe''' ): lowercase__ = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/switch_gating/kernel''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/softmlp/kernel''' ): lowercase__ = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ): lowercase__ = key_name[-9:-7] for i in range(16 ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer) lowercase__ = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/mlp''' ): lowercase__ = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/p1/kernel''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/p1/bias''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/p2/kernel''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/p2/bias''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/ln''' ): lowercase__ = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): lowercase__ = '''model.blocks.%d.feed_forward.norm.bias''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/g''' ): lowercase__ = '''model.blocks.%d.feed_forward.norm.weight''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/att''' ): lowercase__ = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/qkv/kernel''' ): lowercase__ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum lowercase__ = state[:, 0, :, :] lowercase__ = state[:, 1, :, :] lowercase__ = state[:, 2, :, :] lowercase__ = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase__ = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase__ = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase__ = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) lowercase__ = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) lowercase__ = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/o/kernel''' ): lowercase__ = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player lowercase__ = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/an''' ): lowercase__ = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): lowercase__ = '''model.blocks.%d.self_attn.norm.bias''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/g''' ): lowercase__ = '''model.blocks.%d.self_attn.norm.weight''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif ( key_name.startswith('''model/wte''' ) or key_name.startswith('''model/wpe''' ) or key_name.startswith('''model/ete''' ) ): lowercase__ = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[ key_name[-3:] ] lowercase__ = '''model.%s.weight''' % nlayer lowercase__ = vnp.copy() # same in embedded lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) if key_name.startswith('''model/wte''' ): lowercase__ = '''lm_head.weight''' lowercase__ = vnp.copy() # same in embedded lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/wob''' ): lowercase__ = '''final_logits_bias''' lowercase__ = vnp.copy() # same in embedded lowercase__ = state.reshape((1, -1) ) lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name == "model/dense/kernel": lowercase__ = '''model.last_project.weight''' lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name == "model/dense_1/bias": lowercase__ = '''model.last_project.bias''' lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) torch.save(SCREAMING_SNAKE_CASE , args.output ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser( description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model') parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model') lowerCAmelCase = parser.parse_args() convert_tf_gptsan_to_pt(args)
43
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor __magic_name__ = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( UpperCamelCase__ ): """simple docstring""" def __init__( self : Dict , *_snake_case : Optional[Any] , **_snake_case : Union[str, Any] ) -> None: '''simple docstring''' warnings.warn( 'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ImageGPTImageProcessor instead.' , UpperCamelCase_ , ) super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
232
from __future__ import annotations def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" return len(set(SCREAMING_SNAKE_CASE ) ) == len(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
43
0
import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" super().tearDown() gc.collect() def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) UpperCamelCase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) UpperCamelCase = "xvjiarui/stable-diffusion-2-inpainting" UpperCamelCase , UpperCamelCase = FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ ) UpperCamelCase = "Face of a yellow cat, high resolution, sitting on a park bench" UpperCamelCase = jax.random.PRNGKey(0 ) UpperCamelCase = 50 UpperCamelCase = jax.device_count() UpperCamelCase = num_samples * [prompt] UpperCamelCase = num_samples * [init_image] UpperCamelCase = num_samples * [mask_image] UpperCamelCase , UpperCamelCase , UpperCamelCase = pipeline.prepare_inputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # shard inputs and rng UpperCamelCase = replicate(UpperCamelCase_ ) UpperCamelCase = jax.random.split(UpperCamelCase_ , jax.device_count() ) UpperCamelCase = shard(UpperCamelCase_ ) UpperCamelCase = shard(UpperCamelCase_ ) UpperCamelCase = shard(UpperCamelCase_ ) UpperCamelCase = pipeline( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , jit=UpperCamelCase_ ) UpperCamelCase = output.images.reshape(UpperCamelCase_ , 512 , 512 , 3 ) UpperCamelCase = images[0, 253:256, 253:256, -1] UpperCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) ) UpperCamelCase = jnp.array( [0.3_611_307, 0.37_649_736, 0.3_757_408, 0.38_213_953, 0.39_295_167, 0.3_841_631, 0.41_554_978, 0.4_137_475, 0.4_217_084] ) print(f'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
606
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase = { 'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'], 'tokenization_convbert': ['ConvBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['ConvBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ 'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvBertForMaskedLM', 'ConvBertForMultipleChoice', 'ConvBertForQuestionAnswering', 'ConvBertForSequenceClassification', 'ConvBertForTokenClassification', 'ConvBertLayer', 'ConvBertModel', 'ConvBertPreTrainedModel', 'load_tf_weights_in_convbert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ 'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFConvBertForMaskedLM', 'TFConvBertForMultipleChoice', 'TFConvBertForQuestionAnswering', 'TFConvBertForSequenceClassification', 'TFConvBertForTokenClassification', 'TFConvBertLayer', 'TFConvBertModel', 'TFConvBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
43
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase : Tuple = logging.get_logger(__name__) __UpperCAmelCase : List[Any] = { "microsoft/swinv2-tiny-patch4-window8-256": ( "https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json" ), } class _snake_case ( UpperCamelCase__ ): _A = '''swinv2''' _A = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self ,UpperCamelCase=224 ,UpperCamelCase=4 ,UpperCamelCase=3 ,UpperCamelCase=96 ,UpperCamelCase=[2, 2, 6, 2] ,UpperCamelCase=[3, 6, 12, 24] ,UpperCamelCase=7 ,UpperCamelCase=4.0 ,UpperCamelCase=True ,UpperCamelCase=0.0 ,UpperCamelCase=0.0 ,UpperCamelCase=0.1 ,UpperCamelCase="gelu" ,UpperCamelCase=False ,UpperCamelCase=0.02 ,UpperCamelCase=1E-5 ,UpperCamelCase=32 ,**UpperCamelCase ,) -> List[Any]: super().__init__(**UpperCamelCase_ ) snake_case__ :Any = image_size snake_case__ :Optional[Any] = patch_size snake_case__ :Optional[int] = num_channels snake_case__ :List[Any] = embed_dim snake_case__ :str = depths snake_case__ :Optional[int] = len(UpperCamelCase_ ) snake_case__ :List[str] = num_heads snake_case__ :Optional[int] = window_size snake_case__ :Optional[int] = mlp_ratio snake_case__ :Optional[Any] = qkv_bias snake_case__ :Union[str, Any] = hidden_dropout_prob snake_case__ :str = attention_probs_dropout_prob snake_case__ :Union[str, Any] = drop_path_rate snake_case__ :List[str] = hidden_act snake_case__ :Tuple = use_absolute_embeddings snake_case__ :Any = layer_norm_eps snake_case__ :List[str] = initializer_range snake_case__ :List[Any] = encoder_stride # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model snake_case__ :int = int(embed_dim * 2 ** (len(UpperCamelCase_ ) - 1) ) snake_case__ :str = (0, 0, 0, 0)
241
import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _a ( unittest.TestCase ): def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]: """simple docstring""" super().tearDown() gc.collect() def lowerCamelCase_ ( self: Dict ) -> Tuple: """simple docstring""" lowercase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) lowercase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) lowercase__ = '''xvjiarui/stable-diffusion-2-inpainting''' lowercase__ , lowercase__ = FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ ) lowercase__ = '''Face of a yellow cat, high resolution, sitting on a park bench''' lowercase__ = jax.random.PRNGKey(0 ) lowercase__ = 50 lowercase__ = jax.device_count() lowercase__ = num_samples * [prompt] lowercase__ = num_samples * [init_image] lowercase__ = num_samples * [mask_image] lowercase__ , lowercase__ , lowercase__ = pipeline.prepare_inputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # shard inputs and rng lowercase__ = replicate(UpperCamelCase_ ) lowercase__ = jax.random.split(UpperCamelCase_ , jax.device_count() ) lowercase__ = shard(UpperCamelCase_ ) lowercase__ = shard(UpperCamelCase_ ) lowercase__ = shard(UpperCamelCase_ ) lowercase__ = pipeline( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , jit=UpperCamelCase_ ) lowercase__ = output.images.reshape(UpperCamelCase_ , 512 , 512 , 3 ) lowercase__ = images[0, 253:256, 253:256, -1] lowercase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowercase__ = jnp.array( [0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] ) print(f'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
43
0
def UpperCamelCase__ ( lowerCAmelCase__ = 600_851_475_143 ): try: lowercase = int(lowerCAmelCase__ ) except (TypeError, ValueError): raise TypeError("""Parameter n must be int or castable to int.""" ) if n <= 0: raise ValueError("""Parameter n must be greater than or equal to one.""" ) lowercase = 1 lowercase = 2 while i * i <= n: while n % i == 0: lowercase = i n //= i i += 1 if n > 1: lowercase = n return int(lowerCAmelCase__ ) if __name__ == "__main__": print(f'''{solution() = }''')
428
from __future__ import annotations import math def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if len(SCREAMING_SNAKE_CASE ) == 0: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , ) return min( minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , ) def _a ( ): """simple docstring""" lowercase__ = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23] lowercase__ = math.log(len(SCREAMING_SNAKE_CASE ) , 2 ) print('''Optimal value : ''' , end='''''' ) print(minimax(0 , 0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
43
0
import math def lowerCamelCase__ ( _lowercase ): '''simple docstring''' UpperCAmelCase_ : List[Any] = [] UpperCAmelCase_ : List[Any] = 2 UpperCAmelCase_ : Any = int(math.sqrt(_lowercase ) ) # Size of every segment UpperCAmelCase_ : str = [True] * (end + 1) UpperCAmelCase_ : List[Any] = [] while start <= end: if temp[start] is True: in_prime.append(_lowercase ) for i in range(start * start , end + 1 , _lowercase ): UpperCAmelCase_ : str = False start += 1 prime += in_prime UpperCAmelCase_ : str = end + 1 UpperCAmelCase_ : Dict = min(2 * end , _lowercase ) while low <= n: UpperCAmelCase_ : Union[str, Any] = [True] * (high - low + 1) for each in in_prime: UpperCAmelCase_ : str = math.floor(low / each ) * each if t < low: t += each for j in range(_lowercase , high + 1 , _lowercase ): UpperCAmelCase_ : int = False for j in range(len(_lowercase ) ): if temp[j] is True: prime.append(j + low ) UpperCAmelCase_ : Optional[int] = high + 1 UpperCAmelCase_ : List[Any] = min(high + end , _lowercase ) return prime print(sieve(10**6))
30
class _a : def __init__( self: Tuple , UpperCamelCase_: Dict ) -> List[str]: """simple docstring""" lowercase__ = val lowercase__ = None lowercase__ = None def lowerCamelCase_ ( self: Any , UpperCamelCase_: Any ) -> Union[str, Any]: """simple docstring""" if self.val: if val < self.val: if self.left is None: lowercase__ = Node(UpperCamelCase_ ) else: self.left.insert(UpperCamelCase_ ) elif val > self.val: if self.right is None: lowercase__ = Node(UpperCamelCase_ ) else: self.right.insert(UpperCamelCase_ ) else: lowercase__ = val def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" if root: inorder(root.left , SCREAMING_SNAKE_CASE ) res.append(root.val ) inorder(root.right , SCREAMING_SNAKE_CASE ) def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" if len(SCREAMING_SNAKE_CASE ) == 0: return arr lowercase__ = Node(arr[0] ) for i in range(1 , len(SCREAMING_SNAKE_CASE ) ): root.insert(arr[i] ) # Traverse BST in order. lowercase__ = [] inorder(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
43
0
import argparse from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt if __name__ == "__main__": lowerCamelCase__ : Dict = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument( """--original_config_file""", type=str, required=True, help="""The YAML config file corresponding to the original architecture.""", ) parser.add_argument( """--num_in_channels""", default=None, type=int, help="""The number of input channels. If `None` number of input channels will be automatically inferred.""", ) parser.add_argument( """--image_size""", default=5_1_2, type=int, help=( """The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2""" """ Base. Use 768 for Stable Diffusion v2.""" ), ) parser.add_argument( """--extract_ema""", action="""store_true""", help=( """Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights""" """ or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield""" """ higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.""" ), ) parser.add_argument( """--upcast_attention""", action="""store_true""", help=( """Whether the attention computation should always be upcasted. This is necessary when running stable""" """ diffusion 2.1.""" ), ) parser.add_argument( """--from_safetensors""", action="""store_true""", help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""", ) parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""", ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[str]: if string == "True": return True elif string == "False": return False else: raise ValueError(F"""could not parse string as bool {string}""" ) parser.add_argument( """--use_linear_projection""", help="""Override for use linear projection""", required=False, type=parse_bool ) parser.add_argument("""--cross_attention_dim""", help="""Override for cross attention_dim""", required=False, type=int) lowerCamelCase__ : Tuple = parser.parse_args() lowerCamelCase__ : Optional[int] = download_controlnet_from_original_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, extract_ema=args.extract_ema, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, use_linear_projection=args.use_linear_projection, cross_attention_dim=args.cross_attention_dim, ) controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
33
lowerCAmelCase = { 'a': 'AAAAA', 'b': 'AAAAB', 'c': 'AAABA', 'd': 'AAABB', 'e': 'AABAA', 'f': 'AABAB', 'g': 'AABBA', 'h': 'AABBB', 'i': 'ABAAA', 'j': 'BBBAA', 'k': 'ABAAB', 'l': 'ABABA', 'm': 'ABABB', 'n': 'ABBAA', 'o': 'ABBAB', 'p': 'ABBBA', 'q': 'ABBBB', 'r': 'BAAAA', 's': 'BAAAB', 't': 'BAABA', 'u': 'BAABB', 'v': 'BBBAB', 'w': 'BABAA', 'x': 'BABAB', 'y': 'BABBA', 'z': 'BABBB', ' ': ' ', } lowerCAmelCase = {value: key for key, value in encode_dict.items()} def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = '''''' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('''encode() accepts only letters of the alphabet and spaces''' ) return encoded def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" if set(SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set(): raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' ) lowercase__ = '''''' for word in coded.split(): while len(SCREAMING_SNAKE_CASE ) != 0: decoded += decode_dict[word[:5]] lowercase__ = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
43
0
'''simple docstring''' import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class a ( UpperCamelCase__ , unittest.TestCase ): snake_case_ = BioGptTokenizer snake_case_ = False def A_ ( self : Dict ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt snake_case_ = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] snake_case_ = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) ) snake_case_ = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(UpperCamelCase_ ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(UpperCamelCase_ ) ) def A_ ( self : Dict , lowercase_ : Dict ): snake_case_ = '''lower newer''' snake_case_ = '''lower newer''' return input_text, output_text def A_ ( self : Dict ): snake_case_ = BioGptTokenizer(self.vocab_file , self.merges_file ) snake_case_ = '''lower''' snake_case_ = ['''low''', '''er</w>'''] snake_case_ = tokenizer.tokenize(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) snake_case_ = tokens + ['''<unk>'''] snake_case_ = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ ) @slow def A_ ( self : List[Any] ): snake_case_ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) snake_case_ = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCamelCase_ ) snake_case_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCamelCase_ ) snake_case_ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ ) snake_case_ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
640
import numpy as np def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" return 1 / (1 + np.exp(-vector )) if __name__ == "__main__": import doctest doctest.testmod()
43
0
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ): return int((input_a, input_a).count(0 ) != 0 ) def lowerCAmelCase_ ( ): assert nand_gate(0 , 0 ) == 1 assert nand_gate(0 , 1 ) == 1 assert nand_gate(1 , 0 ) == 1 assert nand_gate(1 , 1 ) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
21
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = '▁' lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'} lowerCAmelCase = { 'vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model', }, 'monolingual_vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt', }, } lowerCAmelCase = {'vinai/bartpho-syllable': 1024} class _a ( UpperCamelCase__ ): _lowercase : Tuple = VOCAB_FILES_NAMES _lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP _lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase : Any = ['''input_ids''', '''attention_mask'''] def __init__( self: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any]="<s>" , UpperCamelCase_: List[Any]="</s>" , UpperCamelCase_: Optional[int]="</s>" , UpperCamelCase_: List[str]="<s>" , UpperCamelCase_: Optional[int]="<unk>" , UpperCamelCase_: Optional[int]="<pad>" , UpperCamelCase_: Optional[int]="<mask>" , UpperCamelCase_: Optional[Dict[str, Any]] = None , **UpperCamelCase_: int , ) -> None: """simple docstring""" lowercase__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) lowercase__ = vocab_file lowercase__ = monolingual_vocab_file lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCamelCase_ ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility lowercase__ = {} lowercase__ = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids: lowercase__ = cnt cnt += 1 with open(UpperCamelCase_ , '''r''' , encoding='''utf-8''' ) as f: for line in f.readlines(): lowercase__ = line.strip().split()[0] lowercase__ = len(self.fairseq_tokens_to_ids ) if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids: lowercase__ = len(self.fairseq_tokens_to_ids ) lowercase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self: Tuple ) -> int: """simple docstring""" lowercase__ = self.__dict__.copy() lowercase__ = None lowercase__ = self.sp_model.serialized_model_proto() return state def __setstate__( self: List[str] , UpperCamelCase_: int ) -> List[Any]: """simple docstring""" lowercase__ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowercase__ = {} lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase__ = [self.cls_token_id] lowercase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1] def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowerCamelCase_ ( self: List[str] ) -> List[str]: """simple docstring""" return len(self.fairseq_ids_to_tokens ) def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]: """simple docstring""" lowercase__ = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCamelCase_ ( self: int , UpperCamelCase_: str ) -> List[str]: """simple docstring""" return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ ) def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Any ) -> Dict: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def lowerCamelCase_ ( self: str , UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return self.fairseq_ids_to_tokens[index] def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: int ) -> Dict: """simple docstring""" lowercase__ = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip() return out_string def lowerCamelCase_ ( self: Any , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(UpperCamelCase_ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return lowercase__ = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase__ = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase_ , '''wb''' ) as fi: lowercase__ = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_ ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( UpperCamelCase_ ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , UpperCamelCase_ ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'{str(UpperCamelCase_ )} \n' ) return out_vocab_file, out_monolingual_vocab_file
43
0
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any] ): '''simple docstring''' return 1 if input_a == input_a else 0 def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' assert xnor_gate(0 , 0 ) == 1 assert xnor_gate(0 , 1 ) == 0 assert xnor_gate(1 , 0 ) == 0 assert xnor_gate(1 , 1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
135
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase = logging.get_logger(__name__) def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = original_name.split('''.''' )[0] lowercase__ = key.split('''.''' ) lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 2] ) lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 1] ) lowercase__ = orig_block_num - offset lowercase__ = key.replace(f'{orig_block_num}.{layer_num}.{original_name}' , f'block.{new_block_num}.{layer_num}.{new_name}' ) return key def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = OrderedDict() lowercase__ , lowercase__ = 0, 0 for key, value in state_dict.items(): if key.startswith('''network''' ): lowercase__ = key.replace('''network''' , '''poolformer.encoder''' ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith('''bias''' ) and "patch_embed" not in key: patch_emb_offset += 1 lowercase__ = key[: key.find('''proj''' )] lowercase__ = key.replace(SCREAMING_SNAKE_CASE , f'patch_embeddings.{total_embed_found}.' ) lowercase__ = key.replace('''proj''' , '''projection''' ) if key.endswith('''bias''' ): total_embed_found += 1 if "patch_embeddings" in key: lowercase__ = '''poolformer.encoder.''' + key if "mlp.fc1" in key: lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc1''' , '''output.conv1''' ) if "mlp.fc2" in key: lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc2''' , '''output.conv2''' ) if "norm1" in key: lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm1''' , '''before_norm''' ) if "norm2" in key: lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm2''' , '''after_norm''' ) if "layer_scale_1" in key: lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_1''' , '''layer_scale_1''' ) if "layer_scale_2" in key: lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_2''' , '''layer_scale_2''' ) if "head" in key: lowercase__ = key.replace('''head''' , '''classifier''' ) lowercase__ = value return new_state_dict def _a ( ): """simple docstring""" lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ) return image @torch.no_grad() def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = PoolFormerConfig() # set attributes based on model_name lowercase__ = '''huggingface/label-files''' lowercase__ = model_name[-3:] lowercase__ = 10_00 lowercase__ = '''imagenet-1k-id2label.json''' lowercase__ = (1, 10_00) # set config attributes lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) ) lowercase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowercase__ = idalabel lowercase__ = {v: k for k, v in idalabel.items()} if size == "s12": lowercase__ = [2, 2, 6, 2] lowercase__ = [64, 1_28, 3_20, 5_12] lowercase__ = 4.0 lowercase__ = 0.9 elif size == "s24": lowercase__ = [4, 4, 12, 4] lowercase__ = [64, 1_28, 3_20, 5_12] lowercase__ = 4.0 lowercase__ = 0.9 elif size == "s36": lowercase__ = [6, 6, 18, 6] lowercase__ = [64, 1_28, 3_20, 5_12] lowercase__ = 4.0 lowercase__ = 1E-6 lowercase__ = 0.9 elif size == "m36": lowercase__ = [6, 6, 18, 6] lowercase__ = [96, 1_92, 3_84, 7_68] lowercase__ = 4.0 lowercase__ = 1E-6 lowercase__ = 0.95 elif size == "m48": lowercase__ = [8, 8, 24, 8] lowercase__ = [96, 1_92, 3_84, 7_68] lowercase__ = 4.0 lowercase__ = 1E-6 lowercase__ = 0.95 else: raise ValueError(f'Size {size} not supported' ) # load image processor lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE ) # Prepare image lowercase__ = prepare_img() lowercase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values logger.info(f'Converting model {model_name}...' ) # load original state dict lowercase__ = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device('''cpu''' ) ) # rename keys lowercase__ = rename_keys(SCREAMING_SNAKE_CASE ) # create HuggingFace model and load state dict lowercase__ = PoolFormerForImageClassification(SCREAMING_SNAKE_CASE ) model.load_state_dict(SCREAMING_SNAKE_CASE ) model.eval() # Define image processor lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE ) lowercase__ = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values # forward pass lowercase__ = model(SCREAMING_SNAKE_CASE ) lowercase__ = outputs.logits # define expected logit slices for different models if size == "s12": lowercase__ = torch.tensor([-0.3_045, -0.6_758, -0.4_869] ) elif size == "s24": lowercase__ = torch.tensor([0.4_402, -0.1_374, -0.8_045] ) elif size == "s36": lowercase__ = torch.tensor([-0.6_080, -0.5_133, -0.5_898] ) elif size == "m36": lowercase__ = torch.tensor([0.3_952, 0.2_263, -1.2_668] ) elif size == "m48": lowercase__ = torch.tensor([0.1_167, -0.0_656, -0.3_423] ) else: raise ValueError(f'Size {size} not supported' ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-2 ) # finally, save model and image processor logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' ) Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE ) model.save_pretrained(SCREAMING_SNAKE_CASE ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '--model_name', default='poolformer_s12', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) lowerCAmelCase = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
43
0
import enum import warnings from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING __A = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( enum.Enum ): '''simple docstring''' lowercase_ = 0 lowercase_ = 1 @add_end_docstrings(UpperCamelCase__ ) class _SCREAMING_SNAKE_CASE ( UpperCamelCase__ ): '''simple docstring''' lowercase_ = '''generated''' def __init__(self : int , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Union[str, Any]) ->Tuple: '''simple docstring''' super().__init__(*UpperCamelCase_ , **UpperCamelCase_) self.check_model_type( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING) def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Any=None , **UpperCAmelCase_ : Optional[int] , ) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: List[Any] ={} if truncation is not None: lowerCamelCase__: List[str] =truncation lowerCamelCase__: Union[str, Any] =generate_kwargs lowerCamelCase__: int ={} if return_tensors is not None and return_type is None: lowerCamelCase__: Tuple =ReturnType.TENSORS if return_tensors else ReturnType.TEXT if return_type is not None: lowerCamelCase__: Dict =return_type if clean_up_tokenization_spaces is not None: lowerCamelCase__: int =clean_up_tokenization_spaces if stop_sequence is not None: lowerCamelCase__: List[Any] =self.tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_) if len(UpperCamelCase_) > 1: warnings.warn( "Stopping on a multiple token sequence is not yet supported on transformers. The first token of" " the stop sequence will be used as the stop sequence string in the interim.") lowerCamelCase__: Tuple =stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int) ->Optional[int]: '''simple docstring''' return True def SCREAMING_SNAKE_CASE_ (self : List[Any] , *UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any) ->Tuple: '''simple docstring''' lowerCamelCase__: Tuple =self.model.config.prefix if self.model.config.prefix is not None else "" if isinstance(args[0] , UpperCamelCase_): if self.tokenizer.pad_token_id is None: raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input") lowerCamelCase__: int =([prefix + arg for arg in args[0]],) lowerCamelCase__: Any =True elif isinstance(args[0] , UpperCamelCase_): lowerCamelCase__: Optional[Any] =(prefix + args[0],) lowerCamelCase__: Dict =False else: raise ValueError( F""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""") lowerCamelCase__: int =self.tokenizer(*UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors=self.framework) # This is produced by tokenizers but is an invalid generate kwargs if "token_type_ids" in inputs: del inputs["token_type_ids"] return inputs def __call__(self : int , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Optional[Any]) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: Optional[Any] =super().__call__(*UpperCamelCase_ , **UpperCamelCase_) if ( isinstance(args[0] , UpperCamelCase_) and all(isinstance(UpperCamelCase_ , UpperCamelCase_) for el in args[0]) and all(len(UpperCamelCase_) == 1 for res in result) ): return [res[0] for res in result] return result def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any=TruncationStrategy.DO_NOT_TRUNCATE , **UpperCAmelCase_ : Dict) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: Union[str, Any] =self._parse_and_tokenize(UpperCamelCase_ , truncation=UpperCamelCase_ , **UpperCamelCase_) return inputs def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int) ->List[Any]: '''simple docstring''' if self.framework == "pt": lowerCamelCase__ , lowerCamelCase__: Tuple =model_inputs["input_ids"].shape elif self.framework == "tf": lowerCamelCase__ , lowerCamelCase__: List[Any] =tf.shape(model_inputs["input_ids"]).numpy() lowerCamelCase__: Dict =generate_kwargs.get("min_length" , self.model.config.min_length) lowerCamelCase__: Dict =generate_kwargs.get("max_length" , self.model.config.max_length) self.check_inputs(UpperCamelCase_ , generate_kwargs["min_length"] , generate_kwargs["max_length"]) lowerCamelCase__: int =self.model.generate(**UpperCamelCase_ , **UpperCamelCase_) lowerCamelCase__: str =output_ids.shape[0] if self.framework == "pt": lowerCamelCase__: Tuple =output_ids.reshape(UpperCamelCase_ , out_b // in_b , *output_ids.shape[1:]) elif self.framework == "tf": lowerCamelCase__: Any =tf.reshape(UpperCamelCase_ , (in_b, out_b // in_b, *output_ids.shape[1:])) return {"output_ids": output_ids} def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int=ReturnType.TEXT , UpperCAmelCase_ : Union[str, Any]=False) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: Any =[] for output_ids in model_outputs["output_ids"][0]: if return_type == ReturnType.TENSORS: lowerCamelCase__: str ={F"""{self.return_name}_token_ids""": output_ids} elif return_type == ReturnType.TEXT: lowerCamelCase__: Any ={ F"""{self.return_name}_text""": self.tokenizer.decode( UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , ) } records.append(UpperCamelCase_) return records @add_end_docstrings(UpperCamelCase__ ) class _SCREAMING_SNAKE_CASE ( UpperCamelCase__ ): '''simple docstring''' lowercase_ = '''summary''' def __call__(self : Any , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : str) ->Tuple: '''simple docstring''' return super().__call__(*UpperCamelCase_ , **UpperCamelCase_) def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int) ->bool: '''simple docstring''' if max_length < min_length: logger.warning(F"""Your min_length={min_length} must be inferior than your max_length={max_length}.""") if input_length < max_length: logger.warning( F"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """ "a summarization task, where outputs shorter than the input are typically wanted, you might " F"""consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})""") @add_end_docstrings(UpperCamelCase__ ) class _SCREAMING_SNAKE_CASE ( UpperCamelCase__ ): '''simple docstring''' lowercase_ = '''translation''' def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int) ->Dict: '''simple docstring''' if input_length > 0.9 * max_length: logger.warning( F"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """ "increasing your max_length manually, e.g. translator(\'...\', max_length=400)") return True def SCREAMING_SNAKE_CASE_ (self : List[Any] , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int=TruncationStrategy.DO_NOT_TRUNCATE , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[str]=None) ->Any: '''simple docstring''' if getattr(self.tokenizer , "_build_translation_inputs" , UpperCamelCase_): return self.tokenizer._build_translation_inputs( *UpperCamelCase_ , return_tensors=self.framework , truncation=UpperCamelCase_ , src_lang=UpperCamelCase_ , tgt_lang=UpperCamelCase_) else: return super()._parse_and_tokenize(*UpperCamelCase_ , truncation=UpperCamelCase_) def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : Dict) ->Dict: '''simple docstring''' lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: List[str] =super()._sanitize_parameters(**UpperCamelCase_) if src_lang is not None: lowerCamelCase__: int =src_lang if tgt_lang is not None: lowerCamelCase__: Optional[int] =tgt_lang if src_lang is None and tgt_lang is None: # Backward compatibility, direct arguments use is preferred. lowerCamelCase__: Tuple =kwargs.get("task" , self.task) lowerCamelCase__: str =task.split("_") if task and len(UpperCamelCase_) == 4: # translation, XX, to YY lowerCamelCase__: Union[str, Any] =items[1] lowerCamelCase__: Dict =items[3] return preprocess_params, forward_params, postprocess_params def __call__(self : Optional[Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Dict) ->Tuple: '''simple docstring''' return super().__call__(*UpperCamelCase_ , **UpperCamelCase_)
59
import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) lowerCAmelCase = logging.getLogger() def _a ( ): """simple docstring""" lowercase__ = argparse.ArgumentParser() parser.add_argument('''-f''' ) lowercase__ = parser.parse_args() return args.f def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = {} lowercase__ = os.path.join(SCREAMING_SNAKE_CASE , '''all_results.json''' ) if os.path.exists(SCREAMING_SNAKE_CASE ): with open(SCREAMING_SNAKE_CASE , '''r''' ) as f: lowercase__ = json.load(SCREAMING_SNAKE_CASE ) else: raise ValueError(f'can\'t find {path}' ) return results def _a ( ): """simple docstring""" lowercase__ = torch.cuda.is_available() and torch_device == '''cuda''' return is_using_cuda and is_apex_available() lowerCAmelCase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class _a ( UpperCamelCase__ ): @classmethod def lowerCamelCase_ ( cls: int ) -> Any: """simple docstring""" lowercase__ = tempfile.mkdtemp() lowercase__ = os.path.join(cls.tmpdir , '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) lowercase__ = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def lowerCamelCase_ ( cls: Optional[Any] ) -> Dict: """simple docstring""" shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''' ) run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''glue_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Tuple ) -> Any: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertLess(result['''perplexity'''] , 100 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''clm_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Dict ) -> Optional[Any]: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertLess(result['''perplexity'''] , 42 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''mlm_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Any ) -> int: """simple docstring""" lowercase__ = 7 if get_gpu_count() > 1 else 2 lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertLess(result['''train_loss'''] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''ner_no_trainer''' ) ) ) @unittest.skip(reason='''Fix me @muellerzr''' ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Union[str, Any] ) -> int: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result['''eval_f1'''] , 28 ) self.assertGreaterEqual(result['''eval_exact'''] , 28 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''qa_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: int ) -> str: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''swag_no_trainer''' ) ) ) @slow @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Tuple ) -> Any: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertGreaterEqual(result['''eval_rouge1'''] , 10 ) self.assertGreaterEqual(result['''eval_rouge2'''] , 2 ) self.assertGreaterEqual(result['''eval_rougeL'''] , 7 ) self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''summarization_no_trainer''' ) ) ) @slow @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertGreaterEqual(result['''eval_bleu'''] , 30 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''translation_no_trainer''' ) ) ) @slow def lowerCamelCase_ ( self: Optional[int] ) -> Dict: """simple docstring""" lowercase__ = logging.StreamHandler(sys.stdout ) logger.addHandler(UpperCamelCase_ ) lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[Any]: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''' ) run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) # The base model scores a 25% self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''step_1''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''image_classification_no_trainer''' ) ) )
43
0
def a ( lowerCamelCase_ ): '''simple docstring''' return "".join([hex(lowerCamelCase_ )[2:].zfill(2 ).upper() for byte in list(lowerCamelCase_ )] ) def a ( lowerCamelCase_ ): '''simple docstring''' if (len(lowerCamelCase_ ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(lowerCamelCase_ ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowerCamelCase_ ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
183
from ...utils import logging from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel from .configuration_mta import MTaConfig lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = 'T5Config' class _a ( UpperCamelCase__ ): _lowercase : Optional[int] = '''mt5''' _lowercase : str = MTaConfig class _a ( UpperCamelCase__ ): _lowercase : Optional[Any] = '''mt5''' _lowercase : Optional[Any] = MTaConfig class _a ( UpperCamelCase__ ): _lowercase : Tuple = '''mt5''' _lowercase : Optional[Any] = MTaConfig
43
0
"""simple docstring""" def _lowerCamelCase ( UpperCAmelCase__ ) -> Union[str, Any]: '''simple docstring''' if not isinstance(UpperCAmelCase__,UpperCAmelCase__ ): raise ValueError('Input series is not valid, valid series - [2, 4, 6]' ) if len(UpperCAmelCase__ ) == 0: raise ValueError('Input list must be a non empty list' ) if len(UpperCAmelCase__ ) == 1: return True a__ = series[1] - series[0] for index in range(len(UpperCAmelCase__ ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def _lowerCamelCase ( UpperCAmelCase__ ) -> int: '''simple docstring''' if not isinstance(UpperCAmelCase__,UpperCAmelCase__ ): raise ValueError('Input series is not valid, valid series - [2, 4, 6]' ) if len(UpperCAmelCase__ ) == 0: raise ValueError('Input list must be a non empty list' ) a__ = 0 for val in series: answer += val return answer / len(UpperCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
232
from datetime import datetime import matplotlib.pyplot as plt import torch def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" for param in module.parameters(): lowercase__ = False def _a ( ): """simple docstring""" lowercase__ = '''cuda''' if torch.cuda.is_available() else '''cpu''' if torch.backends.mps.is_available() and torch.backends.mps.is_built(): lowercase__ = '''mps''' if device == "mps": print( '''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch''' ''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues''' ''' with generations.''' ) return device def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = plt.imshow(SCREAMING_SNAKE_CASE ) fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE ) fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE ) plt.show() def _a ( ): """simple docstring""" lowercase__ = datetime.now() lowercase__ = current_time.strftime('''%H:%M:%S''' ) return timestamp
43
0
import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: __a : str = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class __UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=18 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=400 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , ) -> Tuple: """simple docstring""" UpperCamelCase = size if size is not None else {"height": 20, "width": 20} UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = num_channels UpperCamelCase = image_size UpperCamelCase = min_resolution UpperCamelCase = max_resolution UpperCamelCase = size UpperCamelCase = do_normalize UpperCamelCase = do_convert_rgb UpperCamelCase = [512, 1024, 2048, 4096] UpperCamelCase = patch_size if patch_size is not None else {"height": 16, "width": 16} def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg" UpperCamelCase = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ).convert("RGB" ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , ) @require_torch @require_vision class __UpperCAmelCase ( UpperCamelCase__ , unittest.TestCase ): """simple docstring""" lowercase = PixaStructImageProcessor if is_vision_available() else None def __lowerCAmelCase ( self ) -> int: """simple docstring""" UpperCamelCase = PixaStructImageProcessingTester(self ) @property def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase_ , "do_normalize" ) ) self.assertTrue(hasattr(UpperCamelCase_ , "do_convert_rgb" ) ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = self.image_processor_tester.prepare_dummy_image() UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) UpperCamelCase = 2048 UpperCamelCase = image_processor(UpperCamelCase_ , return_tensors="pt" , max_patches=UpperCamelCase_ ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_606 ) , atol=1e-3 , rtol=1e-3 ) ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image ) # Test not batched input UpperCamelCase = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCamelCase = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=UpperCamelCase_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCamelCase = image_processor( UpperCamelCase_ , return_tensors="pt" , max_patches=UpperCamelCase_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image ) # Test not batched input UpperCamelCase = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 UpperCamelCase = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(UpperCamelCase_ ): UpperCamelCase = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=UpperCamelCase_ ).flattened_patches UpperCamelCase = "Hello" UpperCamelCase = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=UpperCamelCase_ , header_text=UpperCamelCase_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCamelCase = image_processor( UpperCamelCase_ , return_tensors="pt" , max_patches=UpperCamelCase_ , header_text=UpperCamelCase_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , np.ndarray ) UpperCamelCase = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCamelCase = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=UpperCamelCase_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCamelCase = image_processor( UpperCamelCase_ , return_tensors="pt" , max_patches=UpperCamelCase_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , torch.Tensor ) # Test not batched input UpperCamelCase = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCamelCase = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=UpperCamelCase_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCamelCase = image_processor( UpperCamelCase_ , return_tensors="pt" , max_patches=UpperCamelCase_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , ) @require_torch @require_vision class __UpperCAmelCase ( UpperCamelCase__ , unittest.TestCase ): """simple docstring""" lowercase = PixaStructImageProcessor if is_vision_available() else None def __lowerCAmelCase ( self ) -> Any: """simple docstring""" UpperCamelCase = PixaStructImageProcessingTester(self , num_channels=4 ) UpperCamelCase = 3 @property def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase_ , "do_normalize" ) ) self.assertTrue(hasattr(UpperCamelCase_ , "do_convert_rgb" ) ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image ) # Test not batched input UpperCamelCase = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCamelCase = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=UpperCamelCase_ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCamelCase = image_processor( UpperCamelCase_ , return_tensors="pt" , max_patches=UpperCamelCase_ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
606
from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _a : def __init__( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Optional[Any]=13 , UpperCamelCase_: Any=30 , UpperCamelCase_: Union[str, Any]=2 , UpperCamelCase_: Tuple=3 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[Any]=32 , UpperCamelCase_: int=2 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Optional[int]=37 , UpperCamelCase_: int="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Any=0.1 , UpperCamelCase_: Optional[int]=10 , UpperCamelCase_: List[str]=0.02 , UpperCamelCase_: List[Any]=3 , UpperCamelCase_: Any=0.6 , UpperCamelCase_: Any=None , ) -> str: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = image_size lowercase__ = patch_size lowercase__ = num_channels lowercase__ = is_training lowercase__ = use_labels lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = type_sequence_label_size lowercase__ = initializer_range lowercase__ = mask_ratio lowercase__ = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowercase__ = (image_size // patch_size) ** 2 lowercase__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def lowerCamelCase_ ( self: List[str] ) -> str: """simple docstring""" lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self: Dict ) -> List[str]: """simple docstring""" return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , UpperCamelCase_: List[Any] ) -> Optional[Any]: """simple docstring""" lowercase__ = TFViTMAEModel(config=UpperCamelCase_ ) lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: Any ) -> Union[str, Any]: """simple docstring""" lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ ) lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ ) # expected sequence length = num_patches lowercase__ = (self.image_size // self.patch_size) ** 2 lowercase__ = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images lowercase__ = 1 lowercase__ = TFViTMAEForPreTraining(UpperCamelCase_ ) lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase__ = model(UpperCamelCase_ , training=UpperCamelCase_ ) lowercase__ = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def lowerCamelCase_ ( self: str ) -> int: """simple docstring""" lowercase__ = self.prepare_config_and_inputs() ((lowercase__) , (lowercase__) , (lowercase__)) = config_and_inputs lowercase__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): _lowercase : int = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () _lowercase : List[str] = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {} _lowercase : Optional[int] = False _lowercase : List[str] = False _lowercase : Optional[int] = False _lowercase : Optional[int] = False def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]: """simple docstring""" lowercase__ = TFViTMAEModelTester(self ) lowercase__ = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 ) def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''ViTMAE does not use inputs_embeds''' ) def lowerCamelCase_ ( self: Dict ) -> List[str]: """simple docstring""" pass def lowerCamelCase_ ( self: List[Any] ) -> List[Any]: """simple docstring""" lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(UpperCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) lowercase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase_ , tf.keras.layers.Layer ) ) def lowerCamelCase_ ( self: Optional[int] ) -> List[str]: """simple docstring""" lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(UpperCamelCase_ ) lowercase__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ = [*signature.parameters.keys()] lowercase__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCamelCase_ ) def lowerCamelCase_ ( self: Tuple ) -> int: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def lowerCamelCase_ ( self: int ) -> str: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase_ ) def lowerCamelCase_ ( self: Union[str, Any] ) -> Any: """simple docstring""" np.random.seed(2 ) lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ = int((config.image_size // config.patch_size) ** 2 ) lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowercase__ = model_class(UpperCamelCase_ ) lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ ) lowercase__ = copy.deepcopy(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) ) lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ ) lowercase__ = outputs_dict[0].numpy() lowercase__ = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 ) def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]: """simple docstring""" np.random.seed(2 ) lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ = int((config.image_size // config.patch_size) ** 2 ) lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(UpperCamelCase_: List[Any] ): lowercase__ = {} for k, v in inputs_dict.items(): if tf.is_tensor(UpperCamelCase_ ): lowercase__ = v.numpy() else: lowercase__ = np.array(UpperCamelCase_ ) return inputs_np_dict for model_class in self.all_model_classes: lowercase__ = model_class(UpperCamelCase_ ) lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = prepare_numpy_arrays(UpperCamelCase_ ) lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ ) lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ ) self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase_ ( self: int , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] , UpperCamelCase_: Tuple ) -> str: """simple docstring""" np.random.seed(2 ) lowercase__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowercase__ = tf.constant(UpperCamelCase_ ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowercase__ = tf_noise super().check_pt_tf_models(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase_ ( self: Dict ) -> Dict: """simple docstring""" np.random.seed(2 ) lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(UpperCamelCase_ ) if module_member_name.endswith('''MainLayer''' ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )] for module_member in (getattr(UpperCamelCase_ , UpperCamelCase_ ),) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(UpperCamelCase_ , '''_keras_serializable''' , UpperCamelCase_ ) } lowercase__ = int((config.image_size // config.patch_size) ** 2 ) lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowercase__ = tf.convert_to_tensor(UpperCamelCase_ ) inputs_dict.update({'''noise''': noise} ) for main_layer_class in tf_main_layer_classes: lowercase__ = main_layer_class(UpperCamelCase_ ) lowercase__ = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } lowercase__ = tf.keras.Model(UpperCamelCase_ , outputs=main_layer(UpperCamelCase_ ) ) lowercase__ = model(UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase__ = os.path.join(UpperCamelCase_ , '''keras_model.h5''' ) model.save(UpperCamelCase_ ) lowercase__ = tf.keras.models.load_model( UpperCamelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(UpperCamelCase_ , tf.keras.Model ) lowercase__ = model(UpperCamelCase_ ) self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ ) @slow def lowerCamelCase_ ( self: List[Any] ) -> Optional[Any]: """simple docstring""" np.random.seed(2 ) lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ = int((config.image_size // config.patch_size) ** 2 ) lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowercase__ = model_class(UpperCamelCase_ ) lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ ) if model_class.__name__ == "TFViTMAEModel": lowercase__ = outputs.last_hidden_state.numpy() lowercase__ = 0 else: lowercase__ = outputs.logits.numpy() lowercase__ = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase_ , saved_model=UpperCamelCase_ ) lowercase__ = model_class.from_pretrained(UpperCamelCase_ ) lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ ) if model_class.__name__ == "TFViTMAEModel": lowercase__ = after_outputs['''last_hidden_state'''].numpy() lowercase__ = 0 else: lowercase__ = after_outputs['''logits'''].numpy() lowercase__ = 0 lowercase__ = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(UpperCamelCase_ , 1E-5 ) def lowerCamelCase_ ( self: Tuple ) -> List[Any]: """simple docstring""" np.random.seed(2 ) lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ = int((config.image_size // config.patch_size) ** 2 ) lowercase__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowercase__ = model_class(UpperCamelCase_ ) lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = model(UpperCamelCase_ , noise=UpperCamelCase_ ) lowercase__ = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(UpperCamelCase_ ) lowercase__ = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config lowercase__ = model_class.from_config(model.config ) lowercase__ = new_model(UpperCamelCase_ ) # Build model new_model.set_weights(model.get_weights() ) lowercase__ = new_model(UpperCamelCase_ , noise=UpperCamelCase_ ) self.assert_outputs_same(UpperCamelCase_ , UpperCamelCase_ ) @unittest.skip( reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.''' ) def lowerCamelCase_ ( self: Optional[int] ) -> str: """simple docstring""" pass @unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' ) def lowerCamelCase_ ( self: Any ) -> Dict: """simple docstring""" pass @slow def lowerCamelCase_ ( self: List[Any] ) -> Optional[int]: """simple docstring""" lowercase__ = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' ) self.assertIsNotNone(UpperCamelCase_ ) def _a ( ): """simple docstring""" lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class _a ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self: Tuple ) -> Tuple: """simple docstring""" return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None @slow def lowerCamelCase_ ( self: int ) -> Optional[int]: """simple docstring""" np.random.seed(2 ) lowercase__ = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ) lowercase__ = self.default_image_processor lowercase__ = prepare_img() lowercase__ = image_processor(images=UpperCamelCase_ , return_tensors='''tf''' ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowercase__ = ViTMAEConfig() lowercase__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) lowercase__ = np.random.uniform(size=(1, num_patches) ) # forward pass lowercase__ = model(**UpperCamelCase_ , noise=UpperCamelCase_ ) # verify the logits lowercase__ = tf.convert_to_tensor([1, 196, 768] ) self.assertEqual(outputs.logits.shape , UpperCamelCase_ ) lowercase__ = tf.convert_to_tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , UpperCamelCase_ , atol=1E-4 )
43
0
from torch import nn def lowercase_ ( __snake_case : Optional[int] ) -> Union[str, Any]: '''simple docstring''' if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F'Unsupported activation function: {act_fn}' )
241
def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" return "".join([hex(SCREAMING_SNAKE_CASE )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE )] ) def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" if (len(SCREAMING_SNAKE_CASE ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(SCREAMING_SNAKE_CASE ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
43
0
import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) __SCREAMING_SNAKE_CASE : int =pytest.mark.integration @pytest.mark.parametrize("""path""" ,["""paws""", """csv"""] ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): inspect_dataset(lowerCAmelCase__ ,lowerCAmelCase__ ) lowercase = path + """.py""" assert script_name in os.listdir(lowerCAmelCase__ ) assert "__pycache__" not in os.listdir(lowerCAmelCase__ ) @pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" ) @pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" ) @pytest.mark.parametrize("""path""" ,["""accuracy"""] ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): inspect_metric(lowerCAmelCase__ ,lowerCAmelCase__ ) lowercase = path + """.py""" assert script_name in os.listdir(lowerCAmelCase__ ) assert "__pycache__" not in os.listdir(lowerCAmelCase__ ) @pytest.mark.parametrize( """path, config_name, expected_splits""" ,[ ("""squad""", """plain_text""", ["""train""", """validation"""]), ("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]), ("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]), ] ,) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = get_dataset_config_info(lowerCAmelCase__ ,config_name=lowerCAmelCase__ ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( """path, config_name, expected_exception""" ,[ ("""paws""", None, ValueError), ] ,) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): with pytest.raises(lowerCAmelCase__ ): get_dataset_config_info(lowerCAmelCase__ ,config_name=lowerCAmelCase__ ) @pytest.mark.parametrize( """path, expected""" ,[ ("""squad""", """plain_text"""), ("""acronym_identification""", """default"""), ("""lhoestq/squad""", """plain_text"""), ("""lhoestq/test""", """default"""), ("""lhoestq/demo1""", """lhoestq--demo1"""), ("""dalle-mini/wit""", """dalle-mini--wit"""), ] ,) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = get_dataset_config_names(lowerCAmelCase__ ) assert expected in config_names @pytest.mark.parametrize( """path, expected_configs, expected_splits_in_first_config""" ,[ ("""squad""", ["""plain_text"""], ["""train""", """validation"""]), ("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]), ("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]), ] ,) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = get_dataset_infos(lowerCAmelCase__ ) assert list(infos.keys() ) == expected_configs lowercase = expected_configs[0] assert expected_config in infos lowercase = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( """path, expected_config, expected_splits""" ,[ ("""squad""", """plain_text""", ["""train""", """validation"""]), ("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]), ("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]), ] ,) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = get_dataset_infos(lowerCAmelCase__ ) assert expected_config in infos lowercase = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( """path, config_name, expected_exception""" ,[ ("""paws""", None, ValueError), ] ,) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): with pytest.raises(lowerCAmelCase__ ): get_dataset_split_names(lowerCAmelCase__ ,config_name=lowerCAmelCase__ )
428
from __future__ import annotations def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ , lowercase__ = position lowercase__ = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] lowercase__ = [] for position in positions: lowercase__ , lowercase__ = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(SCREAMING_SNAKE_CASE ) return permissible_positions def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" return not any(elem == 0 for row in board for elem in row ) def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" if is_complete(SCREAMING_SNAKE_CASE ): return True for position in get_valid_pos(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ): lowercase__ , lowercase__ = position if board[y][x] == 0: lowercase__ = curr + 1 if open_knight_tour_helper(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , curr + 1 ): return True lowercase__ = 0 return False def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = [[0 for i in range(SCREAMING_SNAKE_CASE )] for j in range(SCREAMING_SNAKE_CASE )] for i in range(SCREAMING_SNAKE_CASE ): for j in range(SCREAMING_SNAKE_CASE ): lowercase__ = 1 if open_knight_tour_helper(SCREAMING_SNAKE_CASE , (i, j) , 1 ): return board lowercase__ = 0 lowercase__ = f'Open Kight Tour cannot be performed on a board of size {n}' raise ValueError(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
43
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class __a( unittest.TestCase ): """simple docstring""" def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=18 ,_SCREAMING_SNAKE_CASE=30 ,_SCREAMING_SNAKE_CASE=400 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,_SCREAMING_SNAKE_CASE=None ,) -> Any: UpperCAmelCase_ : List[str] = size if size is not None else {'''shortest_edge''': 18} UpperCAmelCase_ : str = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} UpperCAmelCase_ : Union[str, Any] = parent UpperCAmelCase_ : str = batch_size UpperCAmelCase_ : Union[str, Any] = num_channels UpperCAmelCase_ : Any = num_frames UpperCAmelCase_ : Tuple = image_size UpperCAmelCase_ : Dict = min_resolution UpperCAmelCase_ : List[str] = max_resolution UpperCAmelCase_ : Optional[int] = do_resize UpperCAmelCase_ : Optional[int] = size UpperCAmelCase_ : int = do_normalize UpperCAmelCase_ : Union[str, Any] = image_mean UpperCAmelCase_ : Tuple = image_std UpperCAmelCase_ : Any = crop_size def a__ ( self ) -> List[str]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class __a( UpperCamelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase = VivitImageProcessor if is_vision_available() else None def a__ ( self ) -> Union[str, Any]: UpperCAmelCase_ : Any = VivitImageProcessingTester(self ) @property def a__ ( self ) -> Tuple: return self.image_processor_tester.prepare_image_processor_dict() def a__ ( self ) -> List[Any]: UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase_ ,'''image_mean''' ) ) self.assertTrue(hasattr(UpperCamelCase_ ,'''image_std''' ) ) self.assertTrue(hasattr(UpperCamelCase_ ,'''do_normalize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ ,'''do_resize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ ,'''do_center_crop''' ) ) self.assertTrue(hasattr(UpperCamelCase_ ,'''size''' ) ) def a__ ( self ) -> Union[str, Any]: UpperCAmelCase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{'''shortest_edge''': 18} ) self.assertEqual(image_processor.crop_size ,{'''height''': 18, '''width''': 18} ) UpperCAmelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size ,{'''height''': 84, '''width''': 84} ) def a__ ( self ) -> Optional[Any]: UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL videos UpperCAmelCase_ : Optional[int] = prepare_video_inputs(self.image_processor_tester ,equal_resolution=UpperCamelCase_ ) for video in video_inputs: self.assertIsInstance(UpperCamelCase_ ,UpperCamelCase_ ) self.assertIsInstance(video[0] ,Image.Image ) # Test not batched input UpperCAmelCase_ : List[Any] = image_processing(video_inputs[0] ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape ,( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) # Test batched UpperCAmelCase_ : Dict = image_processing(UpperCamelCase_ ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) def a__ ( self ) -> Optional[int]: UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_ : Tuple = prepare_video_inputs(self.image_processor_tester ,equal_resolution=UpperCamelCase_ ,numpify=UpperCamelCase_ ) for video in video_inputs: self.assertIsInstance(UpperCamelCase_ ,UpperCamelCase_ ) self.assertIsInstance(video[0] ,np.ndarray ) # Test not batched input UpperCAmelCase_ : Optional[int] = image_processing(video_inputs[0] ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape ,( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) # Test batched UpperCAmelCase_ : Tuple = image_processing(UpperCamelCase_ ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) def a__ ( self ) -> int: UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ : Tuple = prepare_video_inputs(self.image_processor_tester ,equal_resolution=UpperCamelCase_ ,torchify=UpperCamelCase_ ) for video in video_inputs: self.assertIsInstance(UpperCamelCase_ ,UpperCamelCase_ ) self.assertIsInstance(video[0] ,torch.Tensor ) # Test not batched input UpperCAmelCase_ : Dict = image_processing(video_inputs[0] ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape ,( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,) # Test batched UpperCAmelCase_ : Any = image_processing(UpperCamelCase_ ,return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) ,)
30
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name lowerCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n' @dataclass class _a ( UpperCamelCase__ ): _lowercase : Union[PIL.Image.Image, np.ndarray] class _a ( UpperCamelCase__ ): def __init__( self: Dict , UpperCamelCase_: PriorTransformer , UpperCamelCase_: CLIPVisionModel , UpperCamelCase_: CLIPImageProcessor , UpperCamelCase_: HeunDiscreteScheduler , UpperCamelCase_: ShapERenderer , ) -> List[str]: """simple docstring""" super().__init__() self.register_modules( prior=UpperCamelCase_ , image_encoder=UpperCamelCase_ , image_processor=UpperCamelCase_ , scheduler=UpperCamelCase_ , renderer=UpperCamelCase_ , ) def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple ) -> List[Any]: """simple docstring""" if latents is None: lowercase__ = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ ) else: if latents.shape != shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' ) lowercase__ = latents.to(UpperCamelCase_ ) lowercase__ = latents * scheduler.init_noise_sigma return latents def lowerCamelCase_ ( self: str , UpperCamelCase_: Tuple=0 ) -> int: """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) lowercase__ = torch.device(f'cuda:{gpu_id}' ) lowercase__ = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(UpperCamelCase_ , UpperCamelCase_ ) @property def lowerCamelCase_ ( self: List[Any] ) -> Dict: """simple docstring""" if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ): return self.device for module in self.image_encoder.modules(): if ( hasattr(UpperCamelCase_ , '''_hf_hook''' ) and hasattr(module._hf_hook , '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: int , UpperCamelCase_: Tuple , UpperCamelCase_: str , ) -> Any: """simple docstring""" if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , torch.Tensor ): lowercase__ = torch.cat(UpperCamelCase_ , axis=0 ) if image[0].ndim == 4 else torch.stack(UpperCamelCase_ , axis=0 ) if not isinstance(UpperCamelCase_ , torch.Tensor ): lowercase__ = self.image_processor(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 ) lowercase__ = image.to(dtype=self.image_encoder.dtype , device=UpperCamelCase_ ) lowercase__ = self.image_encoder(UpperCamelCase_ )['''last_hidden_state'''] lowercase__ = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 lowercase__ = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 ) if do_classifier_free_guidance: lowercase__ = torch.zeros_like(UpperCamelCase_ ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes lowercase__ = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(UpperCamelCase_ ) def __call__( self: Tuple , UpperCamelCase_: Union[PIL.Image.Image, List[PIL.Image.Image]] , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 25 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 64 , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ) -> Union[str, Any]: """simple docstring""" if isinstance(UpperCamelCase_ , PIL.Image.Image ): lowercase__ = 1 elif isinstance(UpperCamelCase_ , torch.Tensor ): lowercase__ = image.shape[0] elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): lowercase__ = len(UpperCamelCase_ ) else: raise ValueError( f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(UpperCamelCase_ )}' ) lowercase__ = self._execution_device lowercase__ = batch_size * num_images_per_prompt lowercase__ = guidance_scale > 1.0 lowercase__ = self._encode_image(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # prior self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ ) lowercase__ = self.scheduler.timesteps lowercase__ = self.prior.config.num_embeddings lowercase__ = self.prior.config.embedding_dim lowercase__ = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim lowercase__ = latents.reshape(latents.shape[0] , UpperCamelCase_ , UpperCamelCase_ ) for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ): # expand the latents if we are doing classifier free guidance lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowercase__ = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = self.prior( UpperCamelCase_ , timestep=UpperCamelCase_ , proj_embedding=UpperCamelCase_ , ).predicted_image_embedding # remove the variance lowercase__ , lowercase__ = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: lowercase__ , lowercase__ = noise_pred.chunk(2 ) lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) lowercase__ = self.scheduler.step( UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=UpperCamelCase_ ) lowercase__ = [] for i, latent in enumerate(UpperCamelCase_ ): print() lowercase__ = self.renderer.decode( latent[None, :] , UpperCamelCase_ , size=UpperCamelCase_ , ray_batch_size=4_096 , n_coarse_samples=64 , n_fine_samples=128 , ) images.append(UpperCamelCase_ ) lowercase__ = torch.stack(UpperCamelCase_ ) if output_type not in ["np", "pil"]: raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' ) lowercase__ = images.cpu().numpy() if output_type == "pil": lowercase__ = [self.numpy_to_pil(UpperCamelCase_ ) for image in images] # Offload last model to CPU if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=UpperCamelCase_ )
43
0
import comet # From: unbabel-comet import torch import datasets lowerCamelCase__ : Union[str, Any] = datasets.logging.get_logger(__name__) lowerCamelCase__ : Optional[Any] = """\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n""" lowerCamelCase__ : Tuple = """\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n""" lowerCamelCase__ : int = """\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric(\'comet\')\n >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n""" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class __magic_name__ (datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:int ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''sources''': datasets.Value('''string''' , id='''sequence''' ), '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[ '''https://github.com/Unbabel/COMET''', '''https://www.aclweb.org/anthology/2020.emnlp-main.213/''', '''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''', ] , ) def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:int ): if self.config_name == "default": snake_case__ = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) ) else: snake_case__ = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def SCREAMING_SNAKE_CASE__ ( self:int , _a:str , _a:Dict , _a:List[str] , _a:Tuple=None , _a:Union[str, Any]=False ): if gpus is None: snake_case__ = 1 if torch.cuda.is_available() else 0 snake_case__ = {'''src''': sources, '''mt''': predictions, '''ref''': references} snake_case__ = [dict(zip(UpperCamelCase_ , UpperCamelCase_ ) ) for t in zip(*data.values() )] snake_case__ , snake_case__ = self.scorer.predict(UpperCamelCase_ , gpus=UpperCamelCase_ , progress_bar=UpperCamelCase_ ) return {"mean_score": mean_score, "scores": scores}
33
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo lowerCAmelCase = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n' lowerCAmelCase = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n' lowerCAmelCase = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _a ( datasets.Metric ): def lowerCamelCase_ ( self: Tuple ) -> MetricInfo: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ), } ) , ) def lowerCamelCase_ ( self: str , UpperCamelCase_: List[List[List[str]]] , UpperCamelCase_: List[List[str]] , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 4 , ) -> Dict[str, float]: """simple docstring""" return { "google_bleu": gleu_score.corpus_gleu( list_of_references=UpperCamelCase_ , hypotheses=UpperCamelCase_ , min_len=UpperCamelCase_ , max_len=UpperCamelCase_ ) }
43
0
'''simple docstring''' from math import asin, atan, cos, radians, sin, sqrt, tan a : Any = 637_8137.0 a : Union[str, Any] = 635_6752.31_4245 a : str = 637_8137 def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[str]: '''simple docstring''' snake_case_ = (AXIS_A - AXIS_B) / AXIS_A snake_case_ = atan((1 - flattening) * tan(radians(__UpperCAmelCase ) ) ) snake_case_ = atan((1 - flattening) * tan(radians(__UpperCAmelCase ) ) ) snake_case_ = radians(__UpperCAmelCase ) snake_case_ = radians(__UpperCAmelCase ) # Equation snake_case_ = sin((phi_a - phi_a) / 2 ) snake_case_ = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda snake_case_ = sqrt(sin_sq_phi + (cos(__UpperCAmelCase ) * cos(__UpperCAmelCase ) * sin_sq_lambda) ) return 2 * RADIUS * asin(__UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
640
import unittest from diffusers.models.unet_ad_blocks import * # noqa F403 from diffusers.utils import torch_device from .test_unet_blocks_common import UNetBlockTesterMixin class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Optional[Any] = DownBlockaD # noqa F405 _lowercase : Dict = '''down''' def lowerCamelCase_ ( self: List[str] ) -> Tuple: """simple docstring""" lowercase__ = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : List[str] = ResnetDownsampleBlockaD # noqa F405 _lowercase : Tuple = '''down''' def lowerCamelCase_ ( self: List[Any] ) -> str: """simple docstring""" lowercase__ = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Any = AttnDownBlockaD # noqa F405 _lowercase : List[Any] = '''down''' def lowerCamelCase_ ( self: Dict ) -> List[str]: """simple docstring""" lowercase__ = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Tuple = CrossAttnDownBlockaD # noqa F405 _lowercase : Optional[int] = '''down''' def lowerCamelCase_ ( self: Optional[Any] ) -> Any: """simple docstring""" lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common() lowercase__ = 32 return init_dict, inputs_dict def lowerCamelCase_ ( self: str ) -> Tuple: """simple docstring""" lowercase__ = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Any = SimpleCrossAttnDownBlockaD # noqa F405 _lowercase : str = '''down''' @property def lowerCamelCase_ ( self: Optional[Any] ) -> List[Any]: """simple docstring""" return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ ) def lowerCamelCase_ ( self: List[str] ) -> List[str]: """simple docstring""" lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common() lowercase__ = 32 return init_dict, inputs_dict @unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' ) def lowerCamelCase_ ( self: Any ) -> int: """simple docstring""" lowercase__ = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Tuple = SkipDownBlockaD # noqa F405 _lowercase : Tuple = '''down''' @property def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]: """simple docstring""" return super().get_dummy_input(include_skip_sample=UpperCamelCase_ ) def lowerCamelCase_ ( self: Dict ) -> List[Any]: """simple docstring""" lowercase__ = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Optional[int] = AttnSkipDownBlockaD # noqa F405 _lowercase : Optional[int] = '''down''' @property def lowerCamelCase_ ( self: str ) -> int: """simple docstring""" return super().get_dummy_input(include_skip_sample=UpperCamelCase_ ) def lowerCamelCase_ ( self: Tuple ) -> Any: """simple docstring""" lowercase__ = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : int = DownEncoderBlockaD # noqa F405 _lowercase : List[Any] = '''down''' @property def lowerCamelCase_ ( self: List[str] ) -> str: """simple docstring""" return super().get_dummy_input(include_temb=UpperCamelCase_ ) def lowerCamelCase_ ( self: Any ) -> List[Any]: """simple docstring""" lowercase__ = { '''in_channels''': 32, '''out_channels''': 32, } lowercase__ = self.dummy_input return init_dict, inputs_dict def lowerCamelCase_ ( self: str ) -> Dict: """simple docstring""" lowercase__ = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : List[str] = AttnDownEncoderBlockaD # noqa F405 _lowercase : int = '''down''' @property def lowerCamelCase_ ( self: Dict ) -> Optional[Any]: """simple docstring""" return super().get_dummy_input(include_temb=UpperCamelCase_ ) def lowerCamelCase_ ( self: str ) -> List[str]: """simple docstring""" lowercase__ = { '''in_channels''': 32, '''out_channels''': 32, } lowercase__ = self.dummy_input return init_dict, inputs_dict def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]: """simple docstring""" lowercase__ = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Union[str, Any] = UNetMidBlockaD # noqa F405 _lowercase : Union[str, Any] = '''mid''' def lowerCamelCase_ ( self: Any ) -> int: """simple docstring""" lowercase__ = { '''in_channels''': 32, '''temb_channels''': 128, } lowercase__ = self.dummy_input return init_dict, inputs_dict def lowerCamelCase_ ( self: Any ) -> Any: """simple docstring""" lowercase__ = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Optional[int] = UNetMidBlockaDCrossAttn # noqa F405 _lowercase : str = '''mid''' def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]: """simple docstring""" lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common() lowercase__ = 32 return init_dict, inputs_dict def lowerCamelCase_ ( self: Dict ) -> List[str]: """simple docstring""" lowercase__ = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Tuple = UNetMidBlockaDSimpleCrossAttn # noqa F405 _lowercase : str = '''mid''' @property def lowerCamelCase_ ( self: int ) -> List[Any]: """simple docstring""" return super().get_dummy_input(include_encoder_hidden_states=UpperCamelCase_ ) def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[Any]: """simple docstring""" lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common() lowercase__ = 32 return init_dict, inputs_dict def lowerCamelCase_ ( self: Union[str, Any] ) -> int: """simple docstring""" lowercase__ = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Union[str, Any] = UpBlockaD # noqa F405 _lowercase : Any = '''up''' @property def lowerCamelCase_ ( self: str ) -> str: """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ ) def lowerCamelCase_ ( self: int ) -> List[Any]: """simple docstring""" lowercase__ = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Tuple = ResnetUpsampleBlockaD # noqa F405 _lowercase : List[Any] = '''up''' @property def lowerCamelCase_ ( self: List[Any] ) -> Union[str, Any]: """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ ) def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[int]: """simple docstring""" lowercase__ = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Any = CrossAttnUpBlockaD # noqa F405 _lowercase : List[str] = '''up''' @property def lowerCamelCase_ ( self: int ) -> Any: """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ ) def lowerCamelCase_ ( self: Any ) -> Any: """simple docstring""" lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common() lowercase__ = 32 return init_dict, inputs_dict def lowerCamelCase_ ( self: Dict ) -> Optional[int]: """simple docstring""" lowercase__ = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Union[str, Any] = SimpleCrossAttnUpBlockaD # noqa F405 _lowercase : Dict = '''up''' @property def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]: """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ , include_encoder_hidden_states=UpperCamelCase_ ) def lowerCamelCase_ ( self: str ) -> int: """simple docstring""" lowercase__ , lowercase__ = super().prepare_init_args_and_inputs_for_common() lowercase__ = 32 return init_dict, inputs_dict def lowerCamelCase_ ( self: Union[str, Any] ) -> int: """simple docstring""" lowercase__ = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : List[str] = AttnUpBlockaD # noqa F405 _lowercase : Optional[Any] = '''up''' @property def lowerCamelCase_ ( self: Tuple ) -> int: """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ ) @unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' ) def lowerCamelCase_ ( self: List[str] ) -> List[str]: """simple docstring""" lowercase__ = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Dict = SkipUpBlockaD # noqa F405 _lowercase : Optional[int] = '''up''' @property def lowerCamelCase_ ( self: Dict ) -> int: """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ ) def lowerCamelCase_ ( self: Optional[Any] ) -> Dict: """simple docstring""" lowercase__ = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : List[str] = AttnSkipUpBlockaD # noqa F405 _lowercase : str = '''up''' @property def lowerCamelCase_ ( self: Optional[Any] ) -> Dict: """simple docstring""" return super().get_dummy_input(include_res_hidden_states_tuple=UpperCamelCase_ ) def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]: """simple docstring""" lowercase__ = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Dict = UpDecoderBlockaD # noqa F405 _lowercase : Tuple = '''up''' @property def lowerCamelCase_ ( self: int ) -> str: """simple docstring""" return super().get_dummy_input(include_temb=UpperCamelCase_ ) def lowerCamelCase_ ( self: List[str] ) -> Optional[Any]: """simple docstring""" lowercase__ = {'''in_channels''': 32, '''out_channels''': 32} lowercase__ = self.dummy_input return init_dict, inputs_dict def lowerCamelCase_ ( self: Tuple ) -> Any: """simple docstring""" lowercase__ = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137] super().test_output(UpperCamelCase_ ) class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Optional[int] = AttnUpDecoderBlockaD # noqa F405 _lowercase : str = '''up''' @property def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]: """simple docstring""" return super().get_dummy_input(include_temb=UpperCamelCase_ ) def lowerCamelCase_ ( self: Dict ) -> List[str]: """simple docstring""" lowercase__ = {'''in_channels''': 32, '''out_channels''': 32} lowercase__ = self.dummy_input return init_dict, inputs_dict def lowerCamelCase_ ( self: int ) -> Optional[Any]: """simple docstring""" lowercase__ = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568] super().test_output(UpperCamelCase_ )
43
0
from collections.abc import Sequence def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ): return sum(c * (x**i) for i, c in enumerate(lowerCamelCase ) ) def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ): __magic_name__ : Union[str, Any] =0.0 for coeff in reversed(lowerCamelCase ): __magic_name__ : Dict =result * x + coeff return result if __name__ == "__main__": UpperCAmelCase_ : int = (0.0, 0.0, 5.0, 9.3, 7.0) UpperCAmelCase_ : str = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
21
def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" lowercase__ = set() # Replace all the whitespace in our sentence lowercase__ = input_str.replace(''' ''' , '''''' ) for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower() ) return len(SCREAMING_SNAKE_CASE ) == 26 def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" lowercase__ = [False] * 26 for char in input_str: if char.islower(): lowercase__ = True elif char.isupper(): lowercase__ = True return all(SCREAMING_SNAKE_CASE ) def _a ( SCREAMING_SNAKE_CASE = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" return len({char for char in input_str.lower() if char.isalpha()} ) == 26 def _a ( ): """simple docstring""" from timeit import timeit lowercase__ = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest''' print(timeit('''is_pangram()''' , setup=SCREAMING_SNAKE_CASE ) ) print(timeit('''is_pangram_faster()''' , setup=SCREAMING_SNAKE_CASE ) ) print(timeit('''is_pangram_fastest()''' , setup=SCREAMING_SNAKE_CASE ) ) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
43
0
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' A: Optional[int] = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=lowerCamelCase__ ) A: List[str] = parser.add_subparsers(help="""accelerate command helpers""" ) # Register commands get_config_parser(subparsers=lowerCamelCase__ ) env_command_parser(subparsers=lowerCamelCase__ ) launch_command_parser(subparsers=lowerCamelCase__ ) tpu_command_parser(subparsers=lowerCamelCase__ ) test_command_parser(subparsers=lowerCamelCase__ ) # Let's go A: Union[str, Any] = parser.parse_args() if not hasattr(lowerCamelCase__ , """func""" ): parser.print_help() exit(1 ) # Run args.func(lowerCamelCase__ ) if __name__ == "__main__": main()
135
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ = np.full((len(SCREAMING_SNAKE_CASE ), sequence_length, 2) , SCREAMING_SNAKE_CASE ) else: lowercase__ = np.full((len(SCREAMING_SNAKE_CASE ), sequence_length) , SCREAMING_SNAKE_CASE ) for i, tensor in enumerate(SCREAMING_SNAKE_CASE ): if padding_side == "right": if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ = tensor[:sequence_length] else: lowercase__ = tensor[:sequence_length] else: if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): lowercase__ = tensor[:sequence_length] else: lowercase__ = tensor[:sequence_length] return out_tensor.tolist() def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = ord(SCREAMING_SNAKE_CASE ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26): return True lowercase__ = unicodedata.category(SCREAMING_SNAKE_CASE ) if cat.startswith('''P''' ): return True return False @dataclass class _a ( UpperCamelCase__ ): _lowercase : PreTrainedTokenizerBase _lowercase : Union[bool, str, PaddingStrategy] = True _lowercase : Optional[int] = None _lowercase : Optional[int] = None _lowercase : int = -100 _lowercase : str = "pt" def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Optional[Any] ) -> List[Any]: """simple docstring""" import torch lowercase__ = '''label''' if '''label''' in features[0].keys() else '''labels''' lowercase__ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None lowercase__ = self.tokenizer.pad( UpperCamelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , ) if labels is None: return batch lowercase__ = torch.tensor(batch['''entity_ids'''] ).shape[1] lowercase__ = self.tokenizer.padding_side if padding_side == "right": lowercase__ = [ list(UpperCamelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(UpperCamelCase_ )) for label in labels ] else: lowercase__ = [ [self.label_pad_token_id] * (sequence_length - len(UpperCamelCase_ )) + list(UpperCamelCase_ ) for label in labels ] lowercase__ = [feature['''ner_tags'''] for feature in features] lowercase__ = padding_tensor(UpperCamelCase_ , -1 , UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = [feature['''original_entity_spans'''] for feature in features] lowercase__ = padding_tensor(UpperCamelCase_ , (-1, -1) , UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = {k: torch.tensor(UpperCamelCase_ , dtype=torch.intaa ) for k, v in batch.items()} return batch
43
0
import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _SCREAMING_SNAKE_CASE ( UpperCamelCase__ , unittest.TestCase ): '''simple docstring''' lowercase_ = FunnelTokenizer lowercase_ = FunnelTokenizerFast lowercase_ = True lowercase_ = True def SCREAMING_SNAKE_CASE_ (self : Any) ->str: '''simple docstring''' super().setUp() lowerCamelCase__: Any =[ "<unk>", "<cls>", "<sep>", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] lowerCamelCase__: str =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , **UpperCAmelCase_ : List[str]) ->Tuple: '''simple docstring''' return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_) def SCREAMING_SNAKE_CASE_ (self : List[str] , **UpperCAmelCase_ : List[str]) ->str: '''simple docstring''' return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase_) def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Tuple) ->List[Any]: '''simple docstring''' lowerCamelCase__: int ="UNwant\u00E9d,running" lowerCamelCase__: Optional[Any] ="unwanted, running" return input_text, output_text def SCREAMING_SNAKE_CASE_ (self : Dict) ->Optional[int]: '''simple docstring''' lowerCamelCase__: Tuple =self.tokenizer_class(self.vocab_file) lowerCamelCase__: Optional[Any] =tokenizer.tokenize("UNwant\u00E9d,running") self.assertListEqual(UpperCamelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_) , [7, 4, 5, 10, 8, 9]) def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Tuple: '''simple docstring''' lowerCamelCase__: List[Any] =self.get_tokenizers(do_lower_case=UpperCamelCase_) for tokenizer in tokenizers: lowerCamelCase__: List[Any] =tokenizer("UNwant\u00E9d,running") lowerCamelCase__: Optional[int] =len(inputs["input_ids"]) - 1 self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len) lowerCamelCase__: List[str] =tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running") self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len)
59
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class _a ( UpperCamelCase__ ): def __init__( self: int , *UpperCamelCase_: str , UpperCamelCase_: List[str]=None , UpperCamelCase_: int=None , **UpperCamelCase_: Optional[Any] ) -> List[str]: """simple docstring""" super().__init__(*UpperCamelCase_ , **UpperCamelCase_ ) lowercase__ = eval_examples lowercase__ = post_process_function def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Optional[Dataset] = None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: str = "eval" , **UpperCamelCase_: int , ) -> Dict[str, float]: """simple docstring""" lowercase__ = gen_kwargs.copy() lowercase__ = ( gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length ) lowercase__ = ( gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams ) lowercase__ = gen_kwargs lowercase__ = self.eval_dataset if eval_dataset is None else eval_dataset lowercase__ = self.get_eval_dataloader(UpperCamelCase_ ) lowercase__ = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. lowercase__ = self.compute_metrics lowercase__ = None lowercase__ = time.time() lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: lowercase__ = eval_loop( UpperCamelCase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , ) finally: lowercase__ = compute_metrics lowercase__ = self.args.eval_batch_size * self.args.world_size if f'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time'] output.metrics.update( speed_metrics( UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = self.compute_metrics(UpperCamelCase_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'{metric_key_prefix}_' ): lowercase__ = metrics.pop(UpperCamelCase_ ) metrics.update(output.metrics ) else: lowercase__ = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(UpperCamelCase_ ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) lowercase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase_ ) return metrics def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: List[str]=None , UpperCamelCase_: str = "test" , **UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]: """simple docstring""" lowercase__ = gen_kwargs.copy() lowercase__ = self.get_test_dataloader(UpperCamelCase_ ) # Temporarily disable metric computation, we will do it in the loop here. lowercase__ = self.compute_metrics lowercase__ = None lowercase__ = time.time() lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: lowercase__ = eval_loop( UpperCamelCase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , ) finally: lowercase__ = compute_metrics lowercase__ = self.args.eval_batch_size * self.args.world_size if f'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time'] output.metrics.update( speed_metrics( UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , '''predict''' ) lowercase__ = self.compute_metrics(UpperCamelCase_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'{metric_key_prefix}_' ): lowercase__ = metrics.pop(UpperCamelCase_ ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase_ )
43
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin A__ : Any = False @skip_mps class _UpperCAmelCase ( UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,unittest.TestCase ): """simple docstring""" lowercase__ = StableDiffusionAttendAndExcitePipeline lowercase__ = False lowercase__ = TEXT_TO_IMAGE_PARAMS lowercase__ = TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} ) lowercase__ = TEXT_TO_IMAGE_IMAGE_PARAMS lowercase__ = TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def lowercase__ ( cls : Optional[int] ): '''simple docstring''' super().setUpClass() torch.use_deterministic_algorithms(UpperCamelCase_ ) @classmethod def lowercase__ ( cls : int ): '''simple docstring''' super().tearDownClass() torch.use_deterministic_algorithms(UpperCamelCase_ ) def lowercase__ ( self : str ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ = UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=1, sample_size=32, in_channels=4, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=UpperCamelCase_, ) lowercase__ = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule='''scaled_linear''', clip_sample=UpperCamelCase_, set_alpha_to_one=UpperCamelCase_, ) torch.manual_seed(0 ) lowercase__ = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, sample_size=128, ) torch.manual_seed(0 ) lowercase__ = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='''gelu''', projection_dim=512, ) lowercase__ = CLIPTextModel(UpperCamelCase_ ) lowercase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowercase__ = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def lowercase__ ( self : str, lowerCamelCase : str, lowerCamelCase : Optional[int]=0 ): '''simple docstring''' if str(UpperCamelCase_ ).startswith('''mps''' ): lowercase__ = torch.manual_seed(UpperCamelCase_ ) else: lowercase__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) lowercase__ = lowercase__ = { '''prompt''': '''a cat and a frog''', '''token_indices''': [2, 5], '''generator''': generator, '''num_inference_steps''': 1, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''max_iter_to_alter''': 2, '''thresholds''': {0: 0.7}, } return inputs def lowercase__ ( self : Tuple ): '''simple docstring''' lowercase__ = '''cpu''' lowercase__ = self.get_dummy_components() lowercase__ = self.pipeline_class(**UpperCamelCase_ ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) lowercase__ = self.get_dummy_inputs(UpperCamelCase_ ) lowercase__ = pipe(**UpperCamelCase_ ).images lowercase__ = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 64, 64, 3) ) lowercase__ = np.array( [0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496] ) lowercase__ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCamelCase_, 1E-3 ) def lowercase__ ( self : Dict ): '''simple docstring''' super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 ) def lowercase__ ( self : Any ): '''simple docstring''' self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def lowercase__ ( self : Optional[int] ): '''simple docstring''' self._test_inference_batch_single_identical(batch_size=2, expected_max_diff=7E-4 ) def lowercase__ ( self : Any ): '''simple docstring''' super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def lowercase__ ( self : Optional[Any] ): '''simple docstring''' super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 ) def lowercase__ ( self : Optional[Any] ): '''simple docstring''' super().test_save_load_local(expected_max_difference=5E-4 ) def lowercase__ ( self : Optional[int] ): '''simple docstring''' super().test_save_load_optional_components(expected_max_difference=4E-4 ) @require_torch_gpu @slow class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" @classmethod def lowercase__ ( cls : Optional[int] ): '''simple docstring''' super().setUpClass() torch.use_deterministic_algorithms(UpperCamelCase_ ) @classmethod def lowercase__ ( cls : Dict ): '''simple docstring''' super().tearDownClass() torch.use_deterministic_algorithms(UpperCamelCase_ ) def lowercase__ ( self : Tuple ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : List[str] ): '''simple docstring''' lowercase__ = torch.manual_seed(51 ) lowercase__ = StableDiffusionAttendAndExcitePipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''', safety_checker=UpperCamelCase_, torch_dtype=torch.floataa ) pipe.to('''cuda''' ) lowercase__ = '''a painting of an elephant with glasses''' lowercase__ = [5, 7] lowercase__ = pipe( prompt=UpperCamelCase_, token_indices=UpperCamelCase_, guidance_scale=7.5, generator=UpperCamelCase_, num_inference_steps=5, max_iter_to_alter=5, output_type='''numpy''', ).images[0] lowercase__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' ) assert np.abs((expected_image - image).max() ) < 5E-1
183
import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = os.path.join(args.tf_model_dir , '''parameters.json''' ) lowercase__ = json.loads(open(SCREAMING_SNAKE_CASE ).read() ) if not params: raise ValueError( f'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' ) if not args.output.endswith('''.pt''' ): lowercase__ = args.output + '''.pt''' lowercase__ = OrderedDict() with tf.device('''/CPU:0''' ): lowercase__ = tf.train.load_checkpoint(args.tf_model_dir ) lowercase__ = reader.get_variable_to_shape_map() for key_name in shapes.keys(): lowercase__ = reader.get_tensor(SCREAMING_SNAKE_CASE ).astype(np.floataa ) if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ): continue if key_name.startswith('''pasts/''' ): if key_name.startswith('''pasts/mlp''' ): lowercase__ = int(key_name[9] ) elif key_name.startswith('''pasts/out''' ): lowercase__ = 8 lowercase__ = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/moe''' ): lowercase__ = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/switch_gating/kernel''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/softmlp/kernel''' ): lowercase__ = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ): lowercase__ = key_name[-9:-7] for i in range(16 ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer) lowercase__ = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/mlp''' ): lowercase__ = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/p1/kernel''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/p1/bias''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/p2/kernel''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/p2/bias''' ): lowercase__ = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/ln''' ): lowercase__ = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): lowercase__ = '''model.blocks.%d.feed_forward.norm.bias''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/g''' ): lowercase__ = '''model.blocks.%d.feed_forward.norm.weight''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/att''' ): lowercase__ = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/qkv/kernel''' ): lowercase__ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum lowercase__ = state[:, 0, :, :] lowercase__ = state[:, 1, :, :] lowercase__ = state[:, 2, :, :] lowercase__ = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase__ = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase__ = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase__ = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) lowercase__ = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) lowercase__ = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/o/kernel''' ): lowercase__ = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player lowercase__ = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/an''' ): lowercase__ = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): lowercase__ = '''model.blocks.%d.self_attn.norm.bias''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.endswith('''/g''' ): lowercase__ = '''model.blocks.%d.self_attn.norm.weight''' % player lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif ( key_name.startswith('''model/wte''' ) or key_name.startswith('''model/wpe''' ) or key_name.startswith('''model/ete''' ) ): lowercase__ = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[ key_name[-3:] ] lowercase__ = '''model.%s.weight''' % nlayer lowercase__ = vnp.copy() # same in embedded lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) if key_name.startswith('''model/wte''' ): lowercase__ = '''lm_head.weight''' lowercase__ = vnp.copy() # same in embedded lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name.startswith('''model/wob''' ): lowercase__ = '''final_logits_bias''' lowercase__ = vnp.copy() # same in embedded lowercase__ = state.reshape((1, -1) ) lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name == "model/dense/kernel": lowercase__ = '''model.last_project.weight''' lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) elif key_name == "model/dense_1/bias": lowercase__ = '''model.last_project.bias''' lowercase__ = vnp.copy() # same because it is one dimensional lowercase__ = torch.tensor(SCREAMING_SNAKE_CASE ) torch.save(SCREAMING_SNAKE_CASE , args.output ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser( description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model') parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model') lowerCAmelCase = parser.parse_args() convert_tf_gptsan_to_pt(args)
43
0
"""simple docstring""" from argparse import ArgumentParser from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand __magic_name__ = logging.get_logger(__name__) # pylint: disable=invalid-name def _lowerCamelCase ( UpperCAmelCase__ ) -> Optional[int]: '''simple docstring''' if not path: return "pipe" for ext in PipelineDataFormat.SUPPORTED_FORMATS: if path.endswith(UpperCAmelCase__ ): return ext raise Exception( f'''Unable to determine file format from file extension {path}. ''' f'''Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}''' ) def _lowerCamelCase ( UpperCAmelCase__ ) -> int: '''simple docstring''' a__ = pipeline( task=args.task,model=args.model if args.model else None,config=args.config,tokenizer=args.tokenizer,device=args.device,) a__ = try_infer_format_from_ext(args.input ) if args.format == 'infer' else args.format a__ = PipelineDataFormat.from_str( format=UpperCAmelCase__,output_path=args.output,input_path=args.input,column=args.column if args.column else nlp.default_input_names,overwrite=args.overwrite,) return RunCommand(UpperCAmelCase__,UpperCAmelCase__ ) class SCREAMING_SNAKE_CASE ( UpperCamelCase__ ): """simple docstring""" def __init__( self : Dict , _snake_case : Pipeline , _snake_case : PipelineDataFormat ) -> Tuple: '''simple docstring''' a__ = nlp a__ = reader @staticmethod def _lowerCAmelCase ( _snake_case : ArgumentParser ) -> Dict: '''simple docstring''' a__ = parser.add_parser('run' , help='Run a pipeline through the CLI' ) run_parser.add_argument('--task' , choices=get_supported_tasks() , help='Task to run' ) run_parser.add_argument('--input' , type=UpperCamelCase_ , help='Path to the file to use for inference' ) run_parser.add_argument('--output' , type=UpperCamelCase_ , help='Path to the file that will be used post to write results.' ) run_parser.add_argument('--model' , type=UpperCamelCase_ , help='Name or path to the model to instantiate.' ) run_parser.add_argument('--config' , type=UpperCamelCase_ , help='Name or path to the model\'s config to instantiate.' ) run_parser.add_argument( '--tokenizer' , type=UpperCamelCase_ , help='Name of the tokenizer to use. (default: same as the model name)' ) run_parser.add_argument( '--column' , type=UpperCamelCase_ , help='Name of the column to use as input. (For multi columns input as QA use column1,columns2)' , ) run_parser.add_argument( '--format' , type=UpperCamelCase_ , default='infer' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='Input format to read from' , ) run_parser.add_argument( '--device' , type=UpperCamelCase_ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , ) run_parser.add_argument('--overwrite' , action='store_true' , help='Allow overwriting the output file.' ) run_parser.set_defaults(func=UpperCamelCase_ ) def _lowerCAmelCase ( self : int ) -> Optional[int]: '''simple docstring''' a__ , a__ = self._nlp, [] for entry in self._reader: a__ = nlp(**UpperCamelCase_ ) if self._reader.is_multi_columns else nlp(UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ): outputs.append(UpperCamelCase_ ) else: outputs += output # Saving data if self._nlp.binary_output: a__ = self._reader.save_binary(UpperCamelCase_ ) logger.warning(F'''Current pipeline requires output to be in binary format, saving at {binary_path}''' ) else: self._reader.save(UpperCamelCase_ )
232
from __future__ import annotations def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" return len(set(SCREAMING_SNAKE_CASE ) ) == len(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
43
0
import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class __UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self ) -> Any: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) -> Any: """simple docstring""" UpperCamelCase = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" ) UpperCamelCase = sd_pipe.to(UpperCamelCase_ ) sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) sd_pipe.set_scheduler("sample_euler" ) UpperCamelCase = "A painting of a squirrel eating a burger" UpperCamelCase = torch.manual_seed(0 ) UpperCamelCase = sd_pipe([prompt] , generator=UpperCamelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" ) UpperCamelCase = output.images UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCamelCase = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" ) UpperCamelCase = sd_pipe.to(UpperCamelCase_ ) sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) sd_pipe.set_scheduler("sample_euler" ) UpperCamelCase = "A painting of a squirrel eating a burger" UpperCamelCase = torch.manual_seed(0 ) UpperCamelCase = sd_pipe([prompt] , generator=UpperCamelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" ) UpperCamelCase = output.images UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCamelCase = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1 def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" ) UpperCamelCase = sd_pipe.to(UpperCamelCase_ ) sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) sd_pipe.set_scheduler("sample_dpmpp_2m" ) UpperCamelCase = "A painting of a squirrel eating a burger" UpperCamelCase = torch.manual_seed(0 ) UpperCamelCase = sd_pipe( [prompt] , generator=UpperCamelCase_ , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=UpperCamelCase_ , ) UpperCamelCase = output.images UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCamelCase = np.array( [0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
606
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase = { 'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'], 'tokenization_convbert': ['ConvBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['ConvBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ 'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvBertForMaskedLM', 'ConvBertForMultipleChoice', 'ConvBertForQuestionAnswering', 'ConvBertForSequenceClassification', 'ConvBertForTokenClassification', 'ConvBertLayer', 'ConvBertModel', 'ConvBertPreTrainedModel', 'load_tf_weights_in_convbert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ 'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFConvBertForMaskedLM', 'TFConvBertForMultipleChoice', 'TFConvBertForQuestionAnswering', 'TFConvBertForSequenceClassification', 'TFConvBertForTokenClassification', 'TFConvBertLayer', 'TFConvBertModel', 'TFConvBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
43
0
from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class _snake_case : _A = 42 _A = None _A = None def lowercase_ ( ) -> Tuple: '''simple docstring''' snake_case__ :List[Any] = Node(1 ) snake_case__ :Any = Node(2 ) snake_case__ :Any = Node(3 ) snake_case__ :Optional[int] = Node(4 ) snake_case__ :List[Any] = Node(5 ) return tree def lowercase_ ( __snake_case : List[str] ) -> Any: '''simple docstring''' return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def lowercase_ ( __snake_case : List[str] ) -> int: '''simple docstring''' return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def lowercase_ ( __snake_case : Optional[int] ) -> Dict: '''simple docstring''' return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def lowercase_ ( __snake_case : Tuple ) -> int: '''simple docstring''' return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def lowercase_ ( __snake_case : Optional[int] ) -> Optional[Any]: '''simple docstring''' snake_case__ :Tuple = [] if root is None: return output snake_case__ :Optional[Any] = deque([root] ) while process_queue: snake_case__ :List[str] = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def lowercase_ ( __snake_case : Optional[int] , __snake_case : Optional[Any] ) -> List[str]: '''simple docstring''' snake_case__ :Any = [] def populate_output(__snake_case : Union[str, Any] , __snake_case : str ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(__snake_case , __snake_case ) return output def lowercase_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Tuple: '''simple docstring''' snake_case__ :List[str] = [] def populate_output(__snake_case : Any , __snake_case : int ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(__snake_case , __snake_case ) return output def lowercase_ ( __snake_case : List[Any] ) -> Any: '''simple docstring''' if root is None: return [] snake_case__ :Dict = [] snake_case__ :int = 0 snake_case__ :Optional[int] = height(__snake_case ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(__snake_case , __snake_case ) ) snake_case__ :Any = 1 else: output.append(get_nodes_from_right_to_left(__snake_case , __snake_case ) ) snake_case__ :List[Any] = 0 return output def lowercase_ ( ) -> Any: # Main function for testing. '''simple docstring''' snake_case__ :Any = make_tree() print(F'In-order Traversal: {inorder(__snake_case )}' ) print(F'Pre-order Traversal: {preorder(__snake_case )}' ) print(F'Post-order Traversal: {postorder(__snake_case )}' , "\n" ) print(F'Height of Tree: {height(__snake_case )}' , "\n" ) print("Complete Level Order Traversal: " ) print(level_order(__snake_case ) , "\n" ) print("Level-wise order Traversal: " ) for level in range(1 , height(__snake_case ) + 1 ): print(F'Level {level}:' , get_nodes_from_left_to_right(__snake_case , level=__snake_case ) ) print("\nZigZag order Traversal: " ) print(zigzag(__snake_case ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
241
import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _a ( unittest.TestCase ): def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]: """simple docstring""" super().tearDown() gc.collect() def lowerCamelCase_ ( self: Dict ) -> Tuple: """simple docstring""" lowercase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) lowercase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) lowercase__ = '''xvjiarui/stable-diffusion-2-inpainting''' lowercase__ , lowercase__ = FlaxStableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ ) lowercase__ = '''Face of a yellow cat, high resolution, sitting on a park bench''' lowercase__ = jax.random.PRNGKey(0 ) lowercase__ = 50 lowercase__ = jax.device_count() lowercase__ = num_samples * [prompt] lowercase__ = num_samples * [init_image] lowercase__ = num_samples * [mask_image] lowercase__ , lowercase__ , lowercase__ = pipeline.prepare_inputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # shard inputs and rng lowercase__ = replicate(UpperCamelCase_ ) lowercase__ = jax.random.split(UpperCamelCase_ , jax.device_count() ) lowercase__ = shard(UpperCamelCase_ ) lowercase__ = shard(UpperCamelCase_ ) lowercase__ = shard(UpperCamelCase_ ) lowercase__ = pipeline( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , jit=UpperCamelCase_ ) lowercase__ = output.images.reshape(UpperCamelCase_ , 512 , 512 , 3 ) lowercase__ = images[0, 253:256, 253:256, -1] lowercase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowercase__ = jnp.array( [0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] ) print(f'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
43
0
from __future__ import annotations from collections.abc import Sequence from typing import Literal def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = list(lowerCAmelCase__ ) lowercase = list(lowerCAmelCase__ ) lowercase = 0 for i in range(len(lowerCAmelCase__ ) ): if lista[i] != lista[i]: count += 1 lowercase = """_""" if count > 1: return False else: return "".join(lowerCAmelCase__ ) def UpperCamelCase__ ( lowerCAmelCase__ ): lowercase = [] while True: lowercase = ["""$"""] * len(lowerCAmelCase__ ) lowercase = [] for i in range(len(lowerCAmelCase__ ) ): for j in range(i + 1 ,len(lowerCAmelCase__ ) ): lowercase = compare_string(binary[i] ,binary[j] ) if k is False: lowercase = """*""" lowercase = """*""" temp.append("""X""" ) for i in range(len(lowerCAmelCase__ ) ): if checka[i] == "$": pi.append(binary[i] ) if len(lowerCAmelCase__ ) == 0: return pi lowercase = list(set(lowerCAmelCase__ ) ) def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = [] for minterm in minterms: lowercase = """""" for _ in range(lowerCAmelCase__ ): lowercase = str(minterm % 2 ) + string minterm //= 2 temp.append(lowerCAmelCase__ ) return temp def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = list(lowerCAmelCase__ ) lowercase = list(lowerCAmelCase__ ) lowercase = 0 for i in range(len(lowerCAmelCase__ ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = [] lowercase = [0] * len(lowerCAmelCase__ ) for i in range(len(chart[0] ) ): lowercase = 0 lowercase = -1 for j in range(len(lowerCAmelCase__ ) ): if chart[j][i] == 1: count += 1 lowercase = j if count == 1: lowercase = 1 for i in range(len(lowerCAmelCase__ ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(lowerCAmelCase__ ) ): lowercase = 0 temp.append(prime_implicants[i] ) while True: lowercase = 0 lowercase = -1 lowercase = 0 for i in range(len(lowerCAmelCase__ ) ): lowercase = chart[i].count(1 ) if count_n > max_n: lowercase = count_n lowercase = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(lowerCAmelCase__ ) ): lowercase = 0 def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowercase = [[0 for x in range(len(lowerCAmelCase__ ) )] for x in range(len(lowerCAmelCase__ ) )] for i in range(len(lowerCAmelCase__ ) ): lowercase = prime_implicants[i].count("""_""" ) for j in range(len(lowerCAmelCase__ ) ): if is_for_table(prime_implicants[i] ,binary[j] ,lowerCAmelCase__ ): lowercase = 1 return chart def UpperCamelCase__ ( ): lowercase = int(input("""Enter the no. of variables\n""" ) ) lowercase = [ float(lowerCAmelCase__ ) for x in input( """Enter the decimal representation of Minterms \'Spaces Separated\'\n""" ).split() ] lowercase = decimal_to_binary(lowerCAmelCase__ ,lowerCAmelCase__ ) lowercase = check(lowerCAmelCase__ ) print("""Prime Implicants are:""" ) print(lowerCAmelCase__ ) lowercase = prime_implicant_chart(lowerCAmelCase__ ,lowerCAmelCase__ ) lowercase = selection(lowerCAmelCase__ ,lowerCAmelCase__ ) print("""Essential Prime Implicants are:""" ) print(lowerCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
428
from __future__ import annotations import math def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if len(SCREAMING_SNAKE_CASE ) == 0: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , ) return min( minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , ) def _a ( ): """simple docstring""" lowercase__ = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23] lowercase__ = math.log(len(SCREAMING_SNAKE_CASE ) , 2 ) print('''Optimal value : ''' , end='''''' ) print(minimax(0 , 0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
43
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { 's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json', } class __a( UpperCamelCase__ ): """simple docstring""" lowerCAmelCase = '''open-llama''' def __init__( self ,_SCREAMING_SNAKE_CASE=100_000 ,_SCREAMING_SNAKE_CASE=4_096 ,_SCREAMING_SNAKE_CASE=11_008 ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE="silu" ,_SCREAMING_SNAKE_CASE=2_048 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-6 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]: UpperCAmelCase_ : List[Any] = vocab_size UpperCAmelCase_ : str = max_position_embeddings UpperCAmelCase_ : Optional[int] = hidden_size UpperCAmelCase_ : Tuple = intermediate_size UpperCAmelCase_ : Optional[int] = num_hidden_layers UpperCAmelCase_ : Union[str, Any] = num_attention_heads UpperCAmelCase_ : int = hidden_act UpperCAmelCase_ : str = initializer_range UpperCAmelCase_ : List[Any] = rms_norm_eps UpperCAmelCase_ : Union[str, Any] = use_cache UpperCAmelCase_ : List[str] = kwargs.pop( '''use_memorry_efficient_attention''' ,UpperCamelCase_ ) UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob UpperCAmelCase_ : Tuple = attention_dropout_prob UpperCAmelCase_ : Optional[int] = use_stable_embedding UpperCAmelCase_ : str = shared_input_output_embedding UpperCAmelCase_ : Union[str, Any] = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=UpperCamelCase_ ,bos_token_id=UpperCamelCase_ ,eos_token_id=UpperCamelCase_ ,tie_word_embeddings=UpperCamelCase_ ,**UpperCamelCase_ ,) def a__ ( self ) -> Tuple: if self.rope_scaling is None: return if not isinstance(self.rope_scaling ,UpperCamelCase_ ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' f'''got {self.rope_scaling}''' ) UpperCAmelCase_ : Any = self.rope_scaling.get('''type''' ,UpperCamelCase_ ) UpperCAmelCase_ : Optional[Any] = self.rope_scaling.get('''factor''' ,UpperCamelCase_ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(UpperCamelCase_ ,UpperCamelCase_ ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
30
class _a : def __init__( self: Tuple , UpperCamelCase_: Dict ) -> List[str]: """simple docstring""" lowercase__ = val lowercase__ = None lowercase__ = None def lowerCamelCase_ ( self: Any , UpperCamelCase_: Any ) -> Union[str, Any]: """simple docstring""" if self.val: if val < self.val: if self.left is None: lowercase__ = Node(UpperCamelCase_ ) else: self.left.insert(UpperCamelCase_ ) elif val > self.val: if self.right is None: lowercase__ = Node(UpperCamelCase_ ) else: self.right.insert(UpperCamelCase_ ) else: lowercase__ = val def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" if root: inorder(root.left , SCREAMING_SNAKE_CASE ) res.append(root.val ) inorder(root.right , SCREAMING_SNAKE_CASE ) def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" if len(SCREAMING_SNAKE_CASE ) == 0: return arr lowercase__ = Node(arr[0] ) for i in range(1 , len(SCREAMING_SNAKE_CASE ) ): root.insert(arr[i] ) # Traverse BST in order. lowercase__ = [] inorder(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
43
0
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__) lowerCamelCase__ : Optional[int] = { """tiiuae/falcon-40b""": """https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json""", """tiiuae/falcon-7b""": """https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json""", } class __magic_name__ (UpperCamelCase__ ): '''simple docstring''' __lowercase : Union[str, Any] = '''falcon''' __lowercase : str = ['''past_key_values'''] def __init__( self:str , _a:Any=6_50_24 , _a:Tuple=45_44 , _a:Union[str, Any]=32 , _a:List[Any]=71 , _a:List[str]=1e-5 , _a:Tuple=0.02 , _a:Tuple=True , _a:Dict=0.0 , _a:Optional[Any]=0.0 , _a:str=None , _a:str=False , _a:str=False , _a:Optional[Any]=True , _a:Union[str, Any]=True , _a:Union[str, Any]=False , _a:Union[str, Any]=11 , _a:Optional[Any]=11 , **_a:int , ): snake_case__ = vocab_size # Backward compatibility with n_embed kwarg snake_case__ = kwargs.pop('''n_embed''' , UpperCamelCase_ ) snake_case__ = hidden_size if n_embed is None else n_embed snake_case__ = num_hidden_layers snake_case__ = num_attention_heads snake_case__ = layer_norm_epsilon snake_case__ = initializer_range snake_case__ = use_cache snake_case__ = hidden_dropout snake_case__ = attention_dropout snake_case__ = bos_token_id snake_case__ = eos_token_id snake_case__ = num_attention_heads if num_kv_heads is None else num_kv_heads snake_case__ = alibi snake_case__ = new_decoder_architecture snake_case__ = multi_query # Ignored when new_decoder_architecture is True snake_case__ = parallel_attn snake_case__ = bias super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) @property def SCREAMING_SNAKE_CASE__ ( self:str ): return self.hidden_size // self.num_attention_heads @property def SCREAMING_SNAKE_CASE__ ( self:List[Any] ): return not self.alibi
33
lowerCAmelCase = { 'a': 'AAAAA', 'b': 'AAAAB', 'c': 'AAABA', 'd': 'AAABB', 'e': 'AABAA', 'f': 'AABAB', 'g': 'AABBA', 'h': 'AABBB', 'i': 'ABAAA', 'j': 'BBBAA', 'k': 'ABAAB', 'l': 'ABABA', 'm': 'ABABB', 'n': 'ABBAA', 'o': 'ABBAB', 'p': 'ABBBA', 'q': 'ABBBB', 'r': 'BAAAA', 's': 'BAAAB', 't': 'BAABA', 'u': 'BAABB', 'v': 'BBBAB', 'w': 'BABAA', 'x': 'BABAB', 'y': 'BABBA', 'z': 'BABBB', ' ': ' ', } lowerCAmelCase = {value: key for key, value in encode_dict.items()} def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = '''''' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('''encode() accepts only letters of the alphabet and spaces''' ) return encoded def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" if set(SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set(): raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' ) lowercase__ = '''''' for word in coded.split(): while len(SCREAMING_SNAKE_CASE ) != 0: decoded += decode_dict[word[:5]] lowercase__ = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
43
0
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class a ( datasets.BuilderConfig ): snake_case_ = None class a ( datasets.ArrowBasedBuilder ): snake_case_ = PandasConfig def A_ ( self : Dict ): return datasets.DatasetInfo(features=self.config.features ) def A_ ( self : Union[str, Any] , lowercase_ : List[Any] ): if not self.config.data_files: raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" ) snake_case_ = dl_manager.download_and_extract(self.config.data_files ) if isinstance(UpperCamelCase_ , (str, list, tuple) ): snake_case_ = data_files if isinstance(UpperCamelCase_ , UpperCamelCase_ ): snake_case_ = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive snake_case_ = [dl_manager.iter_files(UpperCamelCase_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] snake_case_ = [] for split_name, files in data_files.items(): if isinstance(UpperCamelCase_ , UpperCamelCase_ ): snake_case_ = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive snake_case_ = [dl_manager.iter_files(UpperCamelCase_ ) for file in files] splits.append(datasets.SplitGenerator(name=UpperCamelCase_ , gen_kwargs={'''files''': files} ) ) return splits def A_ ( self : List[str] , lowercase_ : pa.Table ): if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example snake_case_ = table_cast(UpperCamelCase_ , self.config.features.arrow_schema ) return pa_table def A_ ( self : Tuple , lowercase_ : List[str] ): for i, file in enumerate(itertools.chain.from_iterable(UpperCamelCase_ ) ): with open(UpperCamelCase_ , '''rb''' ) as f: snake_case_ = pa.Table.from_pandas(pd.read_pickle(UpperCamelCase_ ) ) yield i, self._cast_table(UpperCamelCase_ )
640
import numpy as np def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" return 1 / (1 + np.exp(-vector )) if __name__ == "__main__": import doctest doctest.testmod()
43
0
from unittest.mock import Mock, patch from file_transfer.send_file import send_file @patch("""socket.socket""" ) @patch("""builtins.open""" ) def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ): __magic_name__ : Tuple =Mock() __magic_name__ : str =conn, Mock() __magic_name__ : Optional[int] =iter([1, None] ) __magic_name__ : Dict =lambda lowerCamelCase : next(lowerCamelCase ) # ===== invoke ===== send_file(filename="""mytext.txt""" , testing=lowerCamelCase ) # ===== ensurance ===== sock.assert_called_once() sock.return_value.bind.assert_called_once() sock.return_value.listen.assert_called_once() sock.return_value.accept.assert_called_once() conn.recv.assert_called_once() file.return_value.__enter__.assert_called_once() file.return_value.__enter__.return_value.read.assert_called() conn.send.assert_called_once() conn.close.assert_called_once() sock.return_value.shutdown.assert_called_once() sock.return_value.close.assert_called_once()
21
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = '▁' lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'} lowerCAmelCase = { 'vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model', }, 'monolingual_vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt', }, } lowerCAmelCase = {'vinai/bartpho-syllable': 1024} class _a ( UpperCamelCase__ ): _lowercase : Tuple = VOCAB_FILES_NAMES _lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP _lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase : Any = ['''input_ids''', '''attention_mask'''] def __init__( self: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any]="<s>" , UpperCamelCase_: List[Any]="</s>" , UpperCamelCase_: Optional[int]="</s>" , UpperCamelCase_: List[str]="<s>" , UpperCamelCase_: Optional[int]="<unk>" , UpperCamelCase_: Optional[int]="<pad>" , UpperCamelCase_: Optional[int]="<mask>" , UpperCamelCase_: Optional[Dict[str, Any]] = None , **UpperCamelCase_: int , ) -> None: """simple docstring""" lowercase__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) lowercase__ = vocab_file lowercase__ = monolingual_vocab_file lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCamelCase_ ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility lowercase__ = {} lowercase__ = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids: lowercase__ = cnt cnt += 1 with open(UpperCamelCase_ , '''r''' , encoding='''utf-8''' ) as f: for line in f.readlines(): lowercase__ = line.strip().split()[0] lowercase__ = len(self.fairseq_tokens_to_ids ) if str(UpperCamelCase_ ) not in self.fairseq_tokens_to_ids: lowercase__ = len(self.fairseq_tokens_to_ids ) lowercase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self: Tuple ) -> int: """simple docstring""" lowercase__ = self.__dict__.copy() lowercase__ = None lowercase__ = self.sp_model.serialized_model_proto() return state def __setstate__( self: List[str] , UpperCamelCase_: int ) -> List[Any]: """simple docstring""" lowercase__ = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowercase__ = {} lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase__ = [self.cls_token_id] lowercase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None , UpperCamelCase_: bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1] def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowerCamelCase_ ( self: List[str] ) -> List[str]: """simple docstring""" return len(self.fairseq_ids_to_tokens ) def lowerCamelCase_ ( self: Union[str, Any] ) -> List[str]: """simple docstring""" lowercase__ = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCamelCase_ ( self: int , UpperCamelCase_: str ) -> List[str]: """simple docstring""" return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ ) def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Any ) -> Dict: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def lowerCamelCase_ ( self: str , UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return self.fairseq_ids_to_tokens[index] def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: int ) -> Dict: """simple docstring""" lowercase__ = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip() return out_string def lowerCamelCase_ ( self: Any , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(UpperCamelCase_ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return lowercase__ = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase__ = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase_ , '''wb''' ) as fi: lowercase__ = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_ ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( UpperCamelCase_ ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , UpperCamelCase_ ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'{str(UpperCamelCase_ )} \n' ) return out_vocab_file, out_monolingual_vocab_file
43
0
'''simple docstring''' from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES __SCREAMING_SNAKE_CASE : int =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : List[str] =OrderedDict( [ # Base model mapping ('albert', 'FlaxAlbertModel'), ('bart', 'FlaxBartModel'), ('beit', 'FlaxBeitModel'), ('bert', 'FlaxBertModel'), ('big_bird', 'FlaxBigBirdModel'), ('blenderbot', 'FlaxBlenderbotModel'), ('blenderbot-small', 'FlaxBlenderbotSmallModel'), ('clip', 'FlaxCLIPModel'), ('distilbert', 'FlaxDistilBertModel'), ('electra', 'FlaxElectraModel'), ('gpt-sw3', 'FlaxGPT2Model'), ('gpt2', 'FlaxGPT2Model'), ('gpt_neo', 'FlaxGPTNeoModel'), ('gptj', 'FlaxGPTJModel'), ('longt5', 'FlaxLongT5Model'), ('marian', 'FlaxMarianModel'), ('mbart', 'FlaxMBartModel'), ('mt5', 'FlaxMT5Model'), ('opt', 'FlaxOPTModel'), ('pegasus', 'FlaxPegasusModel'), ('regnet', 'FlaxRegNetModel'), ('resnet', 'FlaxResNetModel'), ('roberta', 'FlaxRobertaModel'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'), ('roformer', 'FlaxRoFormerModel'), ('t5', 'FlaxT5Model'), ('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'), ('vit', 'FlaxViTModel'), ('wav2vec2', 'FlaxWav2Vec2Model'), ('whisper', 'FlaxWhisperModel'), ('xglm', 'FlaxXGLMModel'), ('xlm-roberta', 'FlaxXLMRobertaModel'), ] ) __SCREAMING_SNAKE_CASE : Any =OrderedDict( [ # Model for pre-training mapping ('albert', 'FlaxAlbertForPreTraining'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForPreTraining'), ('big_bird', 'FlaxBigBirdForPreTraining'), ('electra', 'FlaxElectraForPreTraining'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('t5', 'FlaxT5ForConditionalGeneration'), ('wav2vec2', 'FlaxWav2Vec2ForPreTraining'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) __SCREAMING_SNAKE_CASE : Optional[int] =OrderedDict( [ # Model for Masked LM mapping ('albert', 'FlaxAlbertForMaskedLM'), ('bart', 'FlaxBartForConditionalGeneration'), ('bert', 'FlaxBertForMaskedLM'), ('big_bird', 'FlaxBigBirdForMaskedLM'), ('distilbert', 'FlaxDistilBertForMaskedLM'), ('electra', 'FlaxElectraForMaskedLM'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('roberta', 'FlaxRobertaForMaskedLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'), ('roformer', 'FlaxRoFormerForMaskedLM'), ('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'), ] ) __SCREAMING_SNAKE_CASE : str =OrderedDict( [ # Model for Seq2Seq Causal LM mapping ('bart', 'FlaxBartForConditionalGeneration'), ('blenderbot', 'FlaxBlenderbotForConditionalGeneration'), ('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'), ('encoder-decoder', 'FlaxEncoderDecoderModel'), ('longt5', 'FlaxLongT5ForConditionalGeneration'), ('marian', 'FlaxMarianMTModel'), ('mbart', 'FlaxMBartForConditionalGeneration'), ('mt5', 'FlaxMT5ForConditionalGeneration'), ('pegasus', 'FlaxPegasusForConditionalGeneration'), ('t5', 'FlaxT5ForConditionalGeneration'), ] ) __SCREAMING_SNAKE_CASE : Any =OrderedDict( [ # Model for Image-classsification ('beit', 'FlaxBeitForImageClassification'), ('regnet', 'FlaxRegNetForImageClassification'), ('resnet', 'FlaxResNetForImageClassification'), ('vit', 'FlaxViTForImageClassification'), ] ) __SCREAMING_SNAKE_CASE : Optional[Any] =OrderedDict( [ ('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'), ] ) __SCREAMING_SNAKE_CASE : List[str] =OrderedDict( [ # Model for Causal LM mapping ('bart', 'FlaxBartForCausalLM'), ('bert', 'FlaxBertForCausalLM'), ('big_bird', 'FlaxBigBirdForCausalLM'), ('electra', 'FlaxElectraForCausalLM'), ('gpt-sw3', 'FlaxGPT2LMHeadModel'), ('gpt2', 'FlaxGPT2LMHeadModel'), ('gpt_neo', 'FlaxGPTNeoForCausalLM'), ('gptj', 'FlaxGPTJForCausalLM'), ('opt', 'FlaxOPTForCausalLM'), ('roberta', 'FlaxRobertaForCausalLM'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'), ('xglm', 'FlaxXGLMForCausalLM'), ('xlm-roberta', 'FlaxXLMRobertaForCausalLM'), ] ) __SCREAMING_SNAKE_CASE : Optional[Any] =OrderedDict( [ # Model for Sequence Classification mapping ('albert', 'FlaxAlbertForSequenceClassification'), ('bart', 'FlaxBartForSequenceClassification'), ('bert', 'FlaxBertForSequenceClassification'), ('big_bird', 'FlaxBigBirdForSequenceClassification'), ('distilbert', 'FlaxDistilBertForSequenceClassification'), ('electra', 'FlaxElectraForSequenceClassification'), ('mbart', 'FlaxMBartForSequenceClassification'), ('roberta', 'FlaxRobertaForSequenceClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'), ('roformer', 'FlaxRoFormerForSequenceClassification'), ('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'), ] ) __SCREAMING_SNAKE_CASE : str =OrderedDict( [ # Model for Question Answering mapping ('albert', 'FlaxAlbertForQuestionAnswering'), ('bart', 'FlaxBartForQuestionAnswering'), ('bert', 'FlaxBertForQuestionAnswering'), ('big_bird', 'FlaxBigBirdForQuestionAnswering'), ('distilbert', 'FlaxDistilBertForQuestionAnswering'), ('electra', 'FlaxElectraForQuestionAnswering'), ('mbart', 'FlaxMBartForQuestionAnswering'), ('roberta', 'FlaxRobertaForQuestionAnswering'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'), ('roformer', 'FlaxRoFormerForQuestionAnswering'), ('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'), ] ) __SCREAMING_SNAKE_CASE : Optional[int] =OrderedDict( [ # Model for Token Classification mapping ('albert', 'FlaxAlbertForTokenClassification'), ('bert', 'FlaxBertForTokenClassification'), ('big_bird', 'FlaxBigBirdForTokenClassification'), ('distilbert', 'FlaxDistilBertForTokenClassification'), ('electra', 'FlaxElectraForTokenClassification'), ('roberta', 'FlaxRobertaForTokenClassification'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'), ('roformer', 'FlaxRoFormerForTokenClassification'), ('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'), ] ) __SCREAMING_SNAKE_CASE : Optional[Any] =OrderedDict( [ # Model for Multiple Choice mapping ('albert', 'FlaxAlbertForMultipleChoice'), ('bert', 'FlaxBertForMultipleChoice'), ('big_bird', 'FlaxBigBirdForMultipleChoice'), ('distilbert', 'FlaxDistilBertForMultipleChoice'), ('electra', 'FlaxElectraForMultipleChoice'), ('roberta', 'FlaxRobertaForMultipleChoice'), ('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'), ('roformer', 'FlaxRoFormerForMultipleChoice'), ('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'), ] ) __SCREAMING_SNAKE_CASE : Tuple =OrderedDict( [ ('bert', 'FlaxBertForNextSentencePrediction'), ] ) __SCREAMING_SNAKE_CASE : Tuple =OrderedDict( [ ('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'), ('whisper', 'FlaxWhisperForConditionalGeneration'), ] ) __SCREAMING_SNAKE_CASE : Any =OrderedDict( [ ('whisper', 'FlaxWhisperForAudioClassification'), ] ) __SCREAMING_SNAKE_CASE : List[str] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) __SCREAMING_SNAKE_CASE : Dict =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) __SCREAMING_SNAKE_CASE : Dict =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) __SCREAMING_SNAKE_CASE : int =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) __SCREAMING_SNAKE_CASE : Dict =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) __SCREAMING_SNAKE_CASE : int =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) __SCREAMING_SNAKE_CASE : Optional[int] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) __SCREAMING_SNAKE_CASE : int =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) __SCREAMING_SNAKE_CASE : Tuple =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) __SCREAMING_SNAKE_CASE : Optional[int] =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) __SCREAMING_SNAKE_CASE : Dict =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) __SCREAMING_SNAKE_CASE : Any =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) __SCREAMING_SNAKE_CASE : List[Any] =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) __SCREAMING_SNAKE_CASE : Tuple =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): """simple docstring""" A__ : Dict = FLAX_MODEL_MAPPING __SCREAMING_SNAKE_CASE : str =auto_class_update(FlaxAutoModel) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): """simple docstring""" A__ : Any = FLAX_MODEL_FOR_PRETRAINING_MAPPING __SCREAMING_SNAKE_CASE : int =auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): """simple docstring""" A__ : str = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING __SCREAMING_SNAKE_CASE : Optional[int] =auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): """simple docstring""" A__ : str = FLAX_MODEL_FOR_MASKED_LM_MAPPING __SCREAMING_SNAKE_CASE : Union[str, Any] =auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): """simple docstring""" A__ : List[Any] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING __SCREAMING_SNAKE_CASE : List[Any] =auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): """simple docstring""" A__ : str = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING __SCREAMING_SNAKE_CASE : Dict =auto_class_update( FlaxAutoModelForSequenceClassification, head_doc='sequence classification' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): """simple docstring""" A__ : Dict = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING __SCREAMING_SNAKE_CASE : Optional[Any] =auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): """simple docstring""" A__ : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING __SCREAMING_SNAKE_CASE : int =auto_class_update( FlaxAutoModelForTokenClassification, head_doc='token classification' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): """simple docstring""" A__ : str = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING __SCREAMING_SNAKE_CASE : Any =auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): """simple docstring""" A__ : Optional[Any] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING __SCREAMING_SNAKE_CASE : List[str] =auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): """simple docstring""" A__ : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING __SCREAMING_SNAKE_CASE : Any =auto_class_update( FlaxAutoModelForImageClassification, head_doc='image classification' ) class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): """simple docstring""" A__ : int = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING __SCREAMING_SNAKE_CASE : Any =auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling') class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ): """simple docstring""" A__ : str = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING __SCREAMING_SNAKE_CASE : Union[str, Any] =auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling' )
135
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase = logging.get_logger(__name__) def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = original_name.split('''.''' )[0] lowercase__ = key.split('''.''' ) lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 2] ) lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 1] ) lowercase__ = orig_block_num - offset lowercase__ = key.replace(f'{orig_block_num}.{layer_num}.{original_name}' , f'block.{new_block_num}.{layer_num}.{new_name}' ) return key def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = OrderedDict() lowercase__ , lowercase__ = 0, 0 for key, value in state_dict.items(): if key.startswith('''network''' ): lowercase__ = key.replace('''network''' , '''poolformer.encoder''' ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith('''bias''' ) and "patch_embed" not in key: patch_emb_offset += 1 lowercase__ = key[: key.find('''proj''' )] lowercase__ = key.replace(SCREAMING_SNAKE_CASE , f'patch_embeddings.{total_embed_found}.' ) lowercase__ = key.replace('''proj''' , '''projection''' ) if key.endswith('''bias''' ): total_embed_found += 1 if "patch_embeddings" in key: lowercase__ = '''poolformer.encoder.''' + key if "mlp.fc1" in key: lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc1''' , '''output.conv1''' ) if "mlp.fc2" in key: lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc2''' , '''output.conv2''' ) if "norm1" in key: lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm1''' , '''before_norm''' ) if "norm2" in key: lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm2''' , '''after_norm''' ) if "layer_scale_1" in key: lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_1''' , '''layer_scale_1''' ) if "layer_scale_2" in key: lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_2''' , '''layer_scale_2''' ) if "head" in key: lowercase__ = key.replace('''head''' , '''classifier''' ) lowercase__ = value return new_state_dict def _a ( ): """simple docstring""" lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ) return image @torch.no_grad() def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = PoolFormerConfig() # set attributes based on model_name lowercase__ = '''huggingface/label-files''' lowercase__ = model_name[-3:] lowercase__ = 10_00 lowercase__ = '''imagenet-1k-id2label.json''' lowercase__ = (1, 10_00) # set config attributes lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) ) lowercase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowercase__ = idalabel lowercase__ = {v: k for k, v in idalabel.items()} if size == "s12": lowercase__ = [2, 2, 6, 2] lowercase__ = [64, 1_28, 3_20, 5_12] lowercase__ = 4.0 lowercase__ = 0.9 elif size == "s24": lowercase__ = [4, 4, 12, 4] lowercase__ = [64, 1_28, 3_20, 5_12] lowercase__ = 4.0 lowercase__ = 0.9 elif size == "s36": lowercase__ = [6, 6, 18, 6] lowercase__ = [64, 1_28, 3_20, 5_12] lowercase__ = 4.0 lowercase__ = 1E-6 lowercase__ = 0.9 elif size == "m36": lowercase__ = [6, 6, 18, 6] lowercase__ = [96, 1_92, 3_84, 7_68] lowercase__ = 4.0 lowercase__ = 1E-6 lowercase__ = 0.95 elif size == "m48": lowercase__ = [8, 8, 24, 8] lowercase__ = [96, 1_92, 3_84, 7_68] lowercase__ = 4.0 lowercase__ = 1E-6 lowercase__ = 0.95 else: raise ValueError(f'Size {size} not supported' ) # load image processor lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE ) # Prepare image lowercase__ = prepare_img() lowercase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values logger.info(f'Converting model {model_name}...' ) # load original state dict lowercase__ = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device('''cpu''' ) ) # rename keys lowercase__ = rename_keys(SCREAMING_SNAKE_CASE ) # create HuggingFace model and load state dict lowercase__ = PoolFormerForImageClassification(SCREAMING_SNAKE_CASE ) model.load_state_dict(SCREAMING_SNAKE_CASE ) model.eval() # Define image processor lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE ) lowercase__ = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values # forward pass lowercase__ = model(SCREAMING_SNAKE_CASE ) lowercase__ = outputs.logits # define expected logit slices for different models if size == "s12": lowercase__ = torch.tensor([-0.3_045, -0.6_758, -0.4_869] ) elif size == "s24": lowercase__ = torch.tensor([0.4_402, -0.1_374, -0.8_045] ) elif size == "s36": lowercase__ = torch.tensor([-0.6_080, -0.5_133, -0.5_898] ) elif size == "m36": lowercase__ = torch.tensor([0.3_952, 0.2_263, -1.2_668] ) elif size == "m48": lowercase__ = torch.tensor([0.1_167, -0.0_656, -0.3_423] ) else: raise ValueError(f'Size {size} not supported' ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-2 ) # finally, save model and image processor logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' ) Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE ) model.save_pretrained(SCREAMING_SNAKE_CASE ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '--model_name', default='poolformer_s12', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) lowerCAmelCase = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
43
0
__A = 0 # The first color of the flag. __A = 1 # The second color of the flag. __A = 2 # The third color of the flag. __A = (red, white, blue) def lowerCAmelCase_ ( __a ) -> Dict: """simple docstring""" if not sequence: return [] if len(__a ) == 1: return list(__a ) lowerCamelCase__: Union[str, Any] =0 lowerCamelCase__: Tuple =len(__a ) - 1 lowerCamelCase__: Union[str, Any] =0 while mid <= high: if sequence[mid] == colors[0]: lowerCamelCase__ , lowerCamelCase__: Optional[Any] =sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: lowerCamelCase__ , lowerCamelCase__: Dict =sequence[high], sequence[mid] high -= 1 else: lowerCamelCase__: Any =F"""The elements inside the sequence must contains only {colors} values""" raise ValueError(__a ) return sequence if __name__ == "__main__": import doctest doctest.testmod() __A = input("Enter numbers separated by commas:\n").strip() __A = [int(item.strip()) for item in user_input.split(",")] print(f'{dutch_national_flag_sort(unsorted)}')
59
import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) lowerCAmelCase = logging.getLogger() def _a ( ): """simple docstring""" lowercase__ = argparse.ArgumentParser() parser.add_argument('''-f''' ) lowercase__ = parser.parse_args() return args.f def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = {} lowercase__ = os.path.join(SCREAMING_SNAKE_CASE , '''all_results.json''' ) if os.path.exists(SCREAMING_SNAKE_CASE ): with open(SCREAMING_SNAKE_CASE , '''r''' ) as f: lowercase__ = json.load(SCREAMING_SNAKE_CASE ) else: raise ValueError(f'can\'t find {path}' ) return results def _a ( ): """simple docstring""" lowercase__ = torch.cuda.is_available() and torch_device == '''cuda''' return is_using_cuda and is_apex_available() lowerCAmelCase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class _a ( UpperCamelCase__ ): @classmethod def lowerCamelCase_ ( cls: int ) -> Any: """simple docstring""" lowercase__ = tempfile.mkdtemp() lowercase__ = os.path.join(cls.tmpdir , '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) lowercase__ = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def lowerCamelCase_ ( cls: Optional[Any] ) -> Dict: """simple docstring""" shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''' ) run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''glue_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Tuple ) -> Any: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertLess(result['''perplexity'''] , 100 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''clm_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Dict ) -> Optional[Any]: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertLess(result['''perplexity'''] , 42 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''mlm_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Any ) -> int: """simple docstring""" lowercase__ = 7 if get_gpu_count() > 1 else 2 lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertLess(result['''train_loss'''] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''ner_no_trainer''' ) ) ) @unittest.skip(reason='''Fix me @muellerzr''' ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Union[str, Any] ) -> int: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result['''eval_f1'''] , 28 ) self.assertGreaterEqual(result['''eval_exact'''] , 28 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''qa_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: int ) -> str: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''swag_no_trainer''' ) ) ) @slow @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Tuple ) -> Any: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertGreaterEqual(result['''eval_rouge1'''] , 10 ) self.assertGreaterEqual(result['''eval_rouge2'''] , 2 ) self.assertGreaterEqual(result['''eval_rougeL'''] , 7 ) self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''summarization_no_trainer''' ) ) ) @slow @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Union[str, Any] ) -> List[Any]: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertGreaterEqual(result['''eval_bleu'''] , 30 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''translation_no_trainer''' ) ) ) @slow def lowerCamelCase_ ( self: Optional[int] ) -> Dict: """simple docstring""" lowercase__ = logging.StreamHandler(sys.stdout ) logger.addHandler(UpperCamelCase_ ) lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split() run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[Any]: """simple docstring""" lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''' ) run_command(self._launch_args + testargs ) lowercase__ = get_results(UpperCamelCase_ ) # The base model scores a 25% self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''step_1''' ) ) ) self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , '''image_classification_no_trainer''' ) ) )
43
0
import math def a ( lowerCamelCase_ ): '''simple docstring''' return math.sqrt(lowerCamelCase_ ) * math.sqrt(lowerCamelCase_ ) == num def a ( lowerCamelCase_ ): '''simple docstring''' lowercase__ = 0 lowercase__ = n while left <= right: lowercase__ = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: lowercase__ = mid - 1 else: lowercase__ = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
183
from ...utils import logging from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel from .configuration_mta import MTaConfig lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = 'T5Config' class _a ( UpperCamelCase__ ): _lowercase : Optional[int] = '''mt5''' _lowercase : str = MTaConfig class _a ( UpperCamelCase__ ): _lowercase : Optional[Any] = '''mt5''' _lowercase : Optional[Any] = MTaConfig class _a ( UpperCamelCase__ ): _lowercase : Tuple = '''mt5''' _lowercase : Optional[Any] = MTaConfig
43
0