code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import os import time import numpy as np import onnxruntime as ort lowerCAmelCase__ = '''1''' lowerCAmelCase__ = '''0''' lowerCAmelCase__ = '''1''' lowerCAmelCase__ = ort.SessionOptions() lowerCAmelCase__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL print('''Create inference session...''') lowerCAmelCase__ = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider'''] lowerCAmelCase__ = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider) lowerCAmelCase__ = ort.RunOptions() lowerCAmelCase__ = 128 lowerCAmelCase__ = 1 lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa) lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa) lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa) print('''Warm up phase...''') sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print('''Start inference...''') lowerCAmelCase__ = time.time() lowerCAmelCase__ = 2000 lowerCAmelCase__ = {} for iter in range(max_iters): lowerCAmelCase__ = sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1000 / max_iters))
41
import argparse import torch from datasets import load_dataset from donut import DonutModel from transformers import ( DonutImageProcessor, DonutProcessor, DonutSwinConfig, DonutSwinModel, MBartConfig, MBartForCausalLM, VisionEncoderDecoderModel, XLMRobertaTokenizerFast, ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): SCREAMING_SNAKE_CASE__ = model.config SCREAMING_SNAKE_CASE__ = DonutSwinConfig( image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , ) SCREAMING_SNAKE_CASE__ = MBartConfig( is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , add_cross_attention=UpperCamelCase__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len( model.decoder.tokenizer ) , scale_embedding=UpperCamelCase__ , add_final_layer_norm=UpperCamelCase__ , ) return encoder_config, decoder_config def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ): if "encoder.model" in name: SCREAMING_SNAKE_CASE__ = name.replace("""encoder.model""" , """encoder""" ) if "decoder.model" in name: SCREAMING_SNAKE_CASE__ = name.replace("""decoder.model""" , """decoder""" ) if "patch_embed.proj" in name: SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if name.startswith("""encoder""" ): if "layers" in name: SCREAMING_SNAKE_CASE__ = """encoder.""" + name if "attn.proj" in name: SCREAMING_SNAKE_CASE__ = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name and "mask" not in name: SCREAMING_SNAKE_CASE__ = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: SCREAMING_SNAKE_CASE__ = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: SCREAMING_SNAKE_CASE__ = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc2""" , """output.dense""" ) if name == "encoder.norm.weight": SCREAMING_SNAKE_CASE__ = """encoder.layernorm.weight""" if name == "encoder.norm.bias": SCREAMING_SNAKE_CASE__ = """encoder.layernorm.bias""" return name def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Optional[int] ): for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE__ = orig_state_dict.pop(UpperCamelCase__ ) if "qkv" in key: SCREAMING_SNAKE_CASE__ = key.split(""".""" ) SCREAMING_SNAKE_CASE__ = int(key_split[3] ) SCREAMING_SNAKE_CASE__ = int(key_split[5] ) SCREAMING_SNAKE_CASE__ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: SCREAMING_SNAKE_CASE__ = val[:dim, :] SCREAMING_SNAKE_CASE__ = val[dim : dim * 2, :] SCREAMING_SNAKE_CASE__ = val[-dim:, :] else: SCREAMING_SNAKE_CASE__ = val[:dim] SCREAMING_SNAKE_CASE__ = val[dim : dim * 2] SCREAMING_SNAKE_CASE__ = val[-dim:] elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]: # HuggingFace implementation doesn't use attn_mask buffer # and model doesn't use final LayerNorms for the encoder pass else: SCREAMING_SNAKE_CASE__ = val return orig_state_dict def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int=None , UpperCamelCase__: str=False ): # load original model SCREAMING_SNAKE_CASE__ = DonutModel.from_pretrained(UpperCamelCase__ ).eval() # load HuggingFace model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_configs(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = DonutSwinModel(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = MBartForCausalLM(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ ) model.eval() SCREAMING_SNAKE_CASE__ = original_model.state_dict() SCREAMING_SNAKE_CASE__ = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ ) model.load_state_dict(UpperCamelCase__ ) # verify results on scanned document SCREAMING_SNAKE_CASE__ = load_dataset("""hf-internal-testing/example-documents""" ) SCREAMING_SNAKE_CASE__ = dataset["""test"""][0]["""image"""].convert("""RGB""" ) SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizerFast.from_pretrained(UpperCamelCase__ , from_slow=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = DonutImageProcessor( do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] ) SCREAMING_SNAKE_CASE__ = DonutProcessor(UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = processor(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values if model_name == "naver-clova-ix/donut-base-finetuned-docvqa": SCREAMING_SNAKE_CASE__ = """<s_docvqa><s_question>{user_input}</s_question><s_answer>""" SCREAMING_SNAKE_CASE__ = """When is the coffee break?""" SCREAMING_SNAKE_CASE__ = task_prompt.replace("""{user_input}""" , UpperCamelCase__ ) elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip": SCREAMING_SNAKE_CASE__ = """<s_rvlcdip>""" elif model_name in [ "naver-clova-ix/donut-base-finetuned-cord-v1", "naver-clova-ix/donut-base-finetuned-cord-v1-2560", ]: SCREAMING_SNAKE_CASE__ = """<s_cord>""" elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2": SCREAMING_SNAKE_CASE__ = """s_cord-v2>""" elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket": SCREAMING_SNAKE_CASE__ = """<s_zhtrainticket>""" elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]: # use a random prompt SCREAMING_SNAKE_CASE__ = """hello world""" else: raise ValueError("""Model name not supported""" ) SCREAMING_SNAKE_CASE__ = original_model.decoder.tokenizer(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors="""pt""" )[ """input_ids""" ] SCREAMING_SNAKE_CASE__ = original_model.encoder.model.patch_embed(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.encoder.embeddings(UpperCamelCase__ ) assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) # verify encoder hidden states SCREAMING_SNAKE_CASE__ = original_model.encoder(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = model.encoder(UpperCamelCase__ ).last_hidden_state assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-2 ) # verify decoder hidden states SCREAMING_SNAKE_CASE__ = original_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).logits SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCamelCase__ ) processor.save_pretrained(UpperCamelCase__ ) if push_to_hub: model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" ) processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" ) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='naver-clova-ix/donut-base-finetuned-docvqa', required=False, type=str, help='Name of the original model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, required=False, type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model and processor to the 🤗 hub.', ) _lowerCamelCase = parser.parse_args() convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
6
0
'''simple docstring''' import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin A_ = get_tests_dir("fixtures/spiece.model") @require_sentencepiece @require_tokenizers class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE_ = AlbertTokenizer SCREAMING_SNAKE_CASE_ = AlbertTokenizerFast SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = True def UpperCamelCase( self ) -> Optional[Any]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase_ = AlbertTokenizer(SCREAMING_SNAKE_CASE_ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Tuple: '''simple docstring''' lowerCamelCase_ = 'this is a test' lowerCamelCase_ = 'this is a test' return input_text, output_text def UpperCamelCase( self ) -> Optional[int]: '''simple docstring''' lowerCamelCase_ = '<pad>' lowerCamelCase_ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> List[Any]: '''simple docstring''' lowerCamelCase_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<pad>' ) self.assertEqual(vocab_keys[1] , '<unk>' ) self.assertEqual(vocab_keys[-1] , '▁eloquent' ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 30000 ) def UpperCamelCase( self ) -> List[Any]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 30000 ) def UpperCamelCase( self ) -> Union[str, Any]: '''simple docstring''' if not self.test_rust_tokenizer: return lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = self.get_rust_tokenizer() lowerCamelCase_ = 'I was born in 92000, and this is falsé.' lowerCamelCase_ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = self.get_rust_tokenizer() lowerCamelCase_ = tokenizer.encode(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> List[Any]: '''simple docstring''' lowerCamelCase_ = AlbertTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer.tokenize('This is a test' ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['▁this', '▁is', '▁a', '▁test'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [48, 25, 21, 1289] ) lowerCamelCase_ = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] ) lowerCamelCase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] ) lowerCamelCase_ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , ) def UpperCamelCase( self ) -> str: '''simple docstring''' lowerCamelCase_ = AlbertTokenizer(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer.encode('sequence builders' ) lowerCamelCase_ = tokenizer.encode('multi-sequence build' ) lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def UpperCamelCase( self ) -> Optional[int]: '''simple docstring''' lowerCamelCase_ = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , )
42
import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class UpperCamelCase_ ( unittest.TestCase ): def _snake_case ( self :Union[str, Any] ) -> List[str]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self :Any ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" ) SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) sd_pipe.set_scheduler("""sample_euler""" ) SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger""" SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" ) SCREAMING_SNAKE_CASE__ = output.images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _snake_case ( self :str ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) sd_pipe.set_scheduler("""sample_euler""" ) SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger""" SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" ) SCREAMING_SNAKE_CASE__ = output.images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1 def _snake_case ( self :Tuple ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) sd_pipe.set_scheduler("""sample_dpmpp_2m""" ) SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger""" SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = sd_pipe( [prompt] , generator=__A , guidance_scale=7.5 , num_inference_steps=15 , output_type="""np""" , use_karras_sigmas=__A , ) SCREAMING_SNAKE_CASE__ = output.images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ = np.array( [0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
6
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = { 'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json', 'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json', 'junnyu/roformer_chinese_char_small': ( 'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json' ), 'junnyu/roformer_chinese_char_base': ( 'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json' ), 'junnyu/roformer_small_discriminator': ( 'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json' ), 'junnyu/roformer_small_generator': ( 'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class _a ( UpperCamelCase__ ): _lowercase : List[str] = '''roformer''' def __init__( self: Optional[int] , UpperCamelCase_: Optional[Any]=50_000 , UpperCamelCase_: Tuple=None , UpperCamelCase_: int=768 , UpperCamelCase_: Any=12 , UpperCamelCase_: Optional[int]=12 , UpperCamelCase_: Optional[Any]=3_072 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Union[str, Any]=0.1 , UpperCamelCase_: Optional[int]=0.1 , UpperCamelCase_: Tuple=1_536 , UpperCamelCase_: Any=2 , UpperCamelCase_: Dict=0.02 , UpperCamelCase_: Any=1E-1_2 , UpperCamelCase_: Optional[Any]=0 , UpperCamelCase_: str=False , UpperCamelCase_: Optional[int]=True , **UpperCamelCase_: Dict , ) -> List[str]: """simple docstring""" super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ ) lowercase__ = vocab_size lowercase__ = hidden_size if embedding_size is None else embedding_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = hidden_act lowercase__ = intermediate_size lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = rotary_value lowercase__ = use_cache class _a ( UpperCamelCase__ ): @property def lowerCamelCase_ ( self: str ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": lowercase__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowercase__ = {0: '''batch''', 1: '''sequence'''} lowercase__ = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
43
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int = 600_851_475_143 ): try: SCREAMING_SNAKE_CASE__ = int(UpperCamelCase__ ) except (TypeError, ValueError): raise TypeError("""Parameter n must be int or castable to int.""" ) if n <= 0: raise ValueError("""Parameter n must be greater than or equal to one.""" ) SCREAMING_SNAKE_CASE__ = 1 SCREAMING_SNAKE_CASE__ = 2 while i * i <= n: while n % i == 0: SCREAMING_SNAKE_CASE__ = i n //= i i += 1 if n > 1: SCREAMING_SNAKE_CASE__ = n return int(UpperCamelCase__ ) if __name__ == "__main__": print(F'''{solution() = }''')
6
0
'''simple docstring''' import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def A_ ( _lowerCAmelCase : List[str] ): """simple docstring""" _lowerCamelCase : Optional[int] = [] for line in lines: _lowerCamelCase : List[str] = re.sub(r"#.*" , "" , _lowerCAmelCase ) # remove comments if line: filtered_lines.append(_lowerCAmelCase ) _lowerCamelCase : Optional[Any] = "\n".join(_lowerCAmelCase ) # Make a hash from all this code _lowerCamelCase : Tuple = full_str.encode("utf-8" ) return shaaaa(_lowerCAmelCase ).hexdigest() # get importable module names and hash for caching UpperCAmelCase_ : int = { 'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), 'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), 'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), 'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), 'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), 'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), 'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), 'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions UpperCAmelCase_ : Union[str, Any] = { '.csv': ('csv', {}), '.tsv': ('csv', {'sep': '\t'}), '.json': ('json', {}), '.jsonl': ('json', {}), '.parquet': ('parquet', {}), '.arrow': ('arrow', {}), '.txt': ('text', {}), } _EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) UpperCAmelCase_ : Dict = {'imagefolder', 'audiofolder'} # Used to filter data files based on extensions given a module name UpperCAmelCase_ : Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('.zip') _MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
44
import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class UpperCamelCase_ ( unittest.TestCase ): def _snake_case ( self :Tuple ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertTrue(is_safetensors_compatible(__A ) ) def _snake_case ( self :List[str] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertTrue(is_safetensors_compatible(__A ) ) def _snake_case ( self :Tuple ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", """unet/diffusion_pytorch_model.bin""", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__A ) ) def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", ] self.assertTrue(is_safetensors_compatible(__A ) ) def _snake_case ( self :Optional[Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", # Removed: 'text_encoder/model.safetensors', """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertFalse(is_safetensors_compatible(__A ) ) def _snake_case ( self :Tuple ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :Any ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :str ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :List[Any] ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", """unet/diffusion_pytorch_model.fp16.bin""", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertFalse(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :str ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :Optional[int] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", # 'text_encoder/model.fp16.safetensors', """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertFalse(is_safetensors_compatible(__A , variant=__A ) )
6
0
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) UpperCamelCase = "\\n Text data.\n Second line of data." UpperCamelCase = "file" @pytest.fixture(scope="""session""" ) def A ( lowercase__ : List[str] ) -> Union[str, Any]: UpperCamelCase__ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""") UpperCamelCase__ :Optional[Any] = bytes(lowercase__ , """utf-8""" ) with zstd.open(lowercase__ , """wb""" ) as f: f.write(lowercase__ ) return path @pytest.fixture def A ( lowercase__ : str ) -> int: with open(os.path.join(tmpfs.local_root_dir , lowercase__ ) , """w""" ) as f: f.write(lowercase__ ) return FILE_PATH @pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] ) def A ( lowercase__ : Optional[Any] , lowercase__ : Dict , lowercase__ : int , lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : Any ) -> Union[str, Any]: UpperCamelCase__ :Optional[int] = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path} UpperCamelCase__ :List[Any] = input_paths[compression_format] UpperCamelCase__ :Tuple = tmp_path / """cache""" UpperCamelCase__ :Dict = DownloadConfig(cache_dir=lowercase__ , extract_compressed_file=lowercase__ ) UpperCamelCase__ :int = cached_path(lowercase__ , download_config=lowercase__ ) with open(lowercase__ ) as f: UpperCamelCase__ :int = f.read() with open(lowercase__ ) as f: UpperCamelCase__ :Tuple = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize("""default_extracted""" , [True, False] ) @pytest.mark.parametrize("""default_cache_dir""" , [True, False] ) def A ( lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : Dict , lowercase__ : Any , lowercase__ : List[Any] ) -> List[str]: UpperCamelCase__ :Dict = """custom_cache""" UpperCamelCase__ :Union[str, Any] = """custom_extracted_dir""" UpperCamelCase__ :Optional[int] = tmp_path / """custom_extracted_path""" if default_extracted: UpperCamelCase__ :Union[str, Any] = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""") else: monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , lowercase__ ) monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(lowercase__ ) ) UpperCamelCase__ :Dict = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) UpperCamelCase__ :Optional[int] = xz_file UpperCamelCase__ :Optional[Any] = ( DownloadConfig(extract_compressed_file=lowercase__ ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=lowercase__ ) ) UpperCamelCase__ :str = cached_path(lowercase__ , download_config=lowercase__ ) assert Path(lowercase__ ).parent.parts[-2:] == expected def A ( lowercase__ : List[str] ) -> Dict: # absolute path UpperCamelCase__ :Optional[Any] = str(Path(lowercase__ ).resolve() ) assert cached_path(lowercase__ ) == text_file # relative path UpperCamelCase__ :str = str(Path(lowercase__ ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(lowercase__ ) == text_file def A ( lowercase__ : Optional[Any] ) -> Tuple: # absolute path UpperCamelCase__ :Tuple = str(tmp_path.resolve() / """__missing_file__.txt""" ) with pytest.raises(lowercase__ ): cached_path(lowercase__ ) # relative path UpperCamelCase__ :Tuple = """./__missing_file__.txt""" with pytest.raises(lowercase__ ): cached_path(lowercase__ ) def A ( lowercase__ : str ) -> Optional[int]: UpperCamelCase__ :Any = get_from_cache(f"""tmp://{tmpfs_file}""" ) with open(lowercase__ ) as f: UpperCamelCase__ :Tuple = f.read() assert output_file_content == FILE_CONTENT @patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase__ ) def A ( ) -> Tuple: with pytest.raises(lowercase__ ): cached_path("""https://huggingface.co""" ) @patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase__ ) def A ( lowercase__ : str ) -> Optional[int]: UpperCamelCase__ :Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.html""" with pytest.raises(lowercase__ ): http_get("""https://huggingface.co""" , temp_file=lowercase__ ) with pytest.raises(lowercase__ ): http_head("""https://huggingface.co""" ) @patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase__ ) def A ( lowercase__ : List[Any] ) -> int: UpperCamelCase__ :List[str] = tmp_path_factory.mktemp("""data""" ) / """file.html""" with pytest.raises(lowercase__ ): ftp_get("""ftp://huggingface.co""" , temp_file=lowercase__ ) with pytest.raises(lowercase__ ): ftp_head("""ftp://huggingface.co""" ) @patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase__ ) def A ( lowercase__ : Optional[int] ) -> List[Any]: UpperCamelCase__ :Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.html""" with pytest.raises(lowercase__ ): fsspec_get("""s3://huggingface.co""" , temp_file=lowercase__ ) with pytest.raises(lowercase__ ): fsspec_head("""s3://huggingface.co""" )
45
import argparse import datetime def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): SCREAMING_SNAKE_CASE__ = { """0""": """Sunday""", """1""": """Monday""", """2""": """Tuesday""", """3""": """Wednesday""", """4""": """Thursday""", """5""": """Friday""", """6""": """Saturday""", } SCREAMING_SNAKE_CASE__ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(UpperCamelCase__ ) < 11: raise ValueError("""Must be 10 characters long""" ) # Get month SCREAMING_SNAKE_CASE__ = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError("""Month must be between 1 - 12""" ) SCREAMING_SNAKE_CASE__ = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError("""Date separator must be '-' or '/'""" ) # Get day SCREAMING_SNAKE_CASE__ = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError("""Date must be between 1 - 31""" ) # Get second separator SCREAMING_SNAKE_CASE__ = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError("""Date separator must be '-' or '/'""" ) # Get year SCREAMING_SNAKE_CASE__ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8_500: raise ValueError( """Year out of range. There has to be some sort of limit...right?""" ) # Get datetime obj for validation SCREAMING_SNAKE_CASE__ = datetime.date(int(UpperCamelCase__ ) , int(UpperCamelCase__ ) , int(UpperCamelCase__ ) ) # Start math if m <= 2: SCREAMING_SNAKE_CASE__ = y - 1 SCREAMING_SNAKE_CASE__ = m + 12 # maths var SCREAMING_SNAKE_CASE__ = int(str(UpperCamelCase__ )[:2] ) SCREAMING_SNAKE_CASE__ = int(str(UpperCamelCase__ )[2:] ) SCREAMING_SNAKE_CASE__ = int(2.6 * m - 5.3_9 ) SCREAMING_SNAKE_CASE__ = int(c / 4 ) SCREAMING_SNAKE_CASE__ = int(k / 4 ) SCREAMING_SNAKE_CASE__ = int(d + k ) SCREAMING_SNAKE_CASE__ = int(t + u + v + x ) SCREAMING_SNAKE_CASE__ = int(z - (2 * c) ) SCREAMING_SNAKE_CASE__ = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" ) # Response SCREAMING_SNAKE_CASE__ = f'''Your date {date_input}, is a {days[str(UpperCamelCase__ )]}!''' return response if __name__ == "__main__": import doctest doctest.testmod() _lowerCamelCase = argparse.ArgumentParser( description=( 'Find out what day of the week nearly any date is or was. Enter ' 'date as a string in the mm-dd-yyyy or mm/dd/yyyy format' ) ) parser.add_argument( 'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)' ) _lowerCamelCase = parser.parse_args() zeller(args.date_input)
6
0
"""simple docstring""" from typing import Tuple, Union from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...utils import is_timm_available, is_torch_available, requires_backends from ...utils.backbone_utils import BackboneMixin from .configuration_timm_backbone import TimmBackboneConfig if is_timm_available(): import timm if is_torch_available(): from torch import Tensor class A_ ( _a , _a ): lowerCAmelCase__ = 'pixel_values' lowerCAmelCase__ = False lowerCAmelCase__ = TimmBackboneConfig def __init__( self: int ,__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Tuple ): '''simple docstring''' requires_backends(self ,"timm" ) super().__init__(__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = config if config.backbone is None: raise ValueError("backbone is not set in the config. Please set it to a timm model name." ) if config.backbone not in timm.list_models(): raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" ) if hasattr(__lowerCAmelCase ,"out_features" ) and config.out_features is not None: raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." ) _lowerCamelCase : Union[str, Any] = getattr(__lowerCAmelCase ,"use_pretrained_backbone" ,__lowerCAmelCase ) if pretrained is None: raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." ) # We just take the final layer by default. This matches the default for the transformers models. _lowerCamelCase : int = config.out_indices if getattr(__lowerCAmelCase ,"out_indices" ,__lowerCAmelCase ) is not None else (-1,) _lowerCamelCase : List[Any] = timm.create_model( config.backbone ,pretrained=__lowerCAmelCase ,features_only=config.features_only ,in_chans=config.num_channels ,out_indices=__lowerCAmelCase ,**__lowerCAmelCase ,) # These are used to control the output of the model when called. If output_hidden_states is True, then # return_layers is modified to include all layers. _lowerCamelCase : Tuple = self._backbone.return_layers _lowerCamelCase : Tuple = {layer["module"]: str(__lowerCAmelCase ) for i, layer in enumerate(self._backbone.feature_info.info )} super()._init_backbone(__lowerCAmelCase ) @classmethod def _lowercase ( cls: Union[str, Any] ,__lowerCAmelCase: Tuple ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[Any] ): '''simple docstring''' requires_backends(cls ,["vision", "timm"] ) from ...models.timm_backbone import TimmBackboneConfig _lowerCamelCase : List[Any] = kwargs.pop("config" ,TimmBackboneConfig() ) _lowerCamelCase : Tuple = kwargs.pop("use_timm_backbone" ,__lowerCAmelCase ) if not use_timm: raise ValueError("use_timm_backbone must be True for timm backbones" ) _lowerCamelCase : str = kwargs.pop("num_channels" ,config.num_channels ) _lowerCamelCase : int = kwargs.pop("features_only" ,config.features_only ) _lowerCamelCase : Dict = kwargs.pop("use_pretrained_backbone" ,config.use_pretrained_backbone ) _lowerCamelCase : List[Any] = kwargs.pop("out_indices" ,config.out_indices ) _lowerCamelCase : Tuple = TimmBackboneConfig( backbone=__lowerCAmelCase ,num_channels=__lowerCAmelCase ,features_only=__lowerCAmelCase ,use_pretrained_backbone=__lowerCAmelCase ,out_indices=__lowerCAmelCase ,) return super()._from_config(__lowerCAmelCase ,**__lowerCAmelCase ) def _lowercase ( self: List[str] ,__lowerCAmelCase: List[str] ): '''simple docstring''' pass def _lowercase ( self: List[str] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: List[str]=None ,**__lowerCAmelCase: Tuple ): '''simple docstring''' _lowerCamelCase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict _lowerCamelCase : List[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _lowerCamelCase : List[Any] = output_attentions if output_attentions is not None else self.config.output_attentions if output_attentions: raise ValueError("Cannot output attentions for timm backbones at the moment" ) if output_hidden_states: # We modify the return layers to include all the stages of the backbone _lowerCamelCase : Union[str, Any] = self._all_layers _lowerCamelCase : Tuple = self._backbone(__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCamelCase : List[Any] = self._return_layers _lowerCamelCase : List[Any] = tuple(hidden_states[i] for i in self.out_indices ) else: _lowerCamelCase : Dict = self._backbone(__lowerCAmelCase ,**__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = None _lowerCamelCase : List[str] = tuple(__lowerCAmelCase ) _lowerCamelCase : List[Any] = tuple(__lowerCAmelCase ) if hidden_states is not None else None if not return_dict: _lowerCamelCase : Any = (feature_maps,) if output_hidden_states: _lowerCamelCase : Any = output + (hidden_states,) return output return BackboneOutput(feature_maps=__lowerCAmelCase ,hidden_states=__lowerCAmelCase ,attentions=__lowerCAmelCase )
46
import argparse import logging import pickle from collections import Counter logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) _lowerCamelCase = logging.getLogger(__name__) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser( description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)' ) parser.add_argument( '--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.' ) parser.add_argument( '--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.' ) parser.add_argument('--vocab_size', default=30522, type=int) _lowerCamelCase = parser.parse_args() logger.info(F'''Loading data from {args.data_file}''') with open(args.data_file, 'rb') as fp: _lowerCamelCase = pickle.load(fp) logger.info('Counting occurrences for MLM.') _lowerCamelCase = Counter() for tk_ids in data: counter.update(tk_ids) _lowerCamelCase = [0] * args.vocab_size for k, v in counter.items(): _lowerCamelCase = v logger.info(F'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, 'wb') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
6
0
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} # See all LED models at https://huggingface.co/models?filter=LED SCREAMING_SNAKE_CASE__ = { '''vocab_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''', }, '''merges_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''', }, } SCREAMING_SNAKE_CASE__ = { '''allenai/led-base-16384''': 1_6384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def UpperCAmelCase__ ( ): __a : List[str] = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) __a : str = bs[:] __a : List[Any] = 0 for b in range(2**8 ): if b not in bs: bs.append(lowerCamelCase_ ) cs.append(2**8 + n ) n += 1 __a : Dict = [chr(lowerCamelCase_ ) for n in cs] return dict(zip(lowerCamelCase_ , lowerCamelCase_ ) ) def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] ): __a : Optional[Any] = set() __a : Optional[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __a : Dict = char return pairs class _UpperCamelCase( __lowerCamelCase ): __SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : int = ['''input_ids''', '''attention_mask'''] def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple="replace" , SCREAMING_SNAKE_CASE__ : int="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : int="</s>" , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[Any]="<unk>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<pad>" , SCREAMING_SNAKE_CASE__ : List[Any]="<mask>" , SCREAMING_SNAKE_CASE__ : Optional[int]=False , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ): '''simple docstring''' __a : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else bos_token __a : List[Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else eos_token __a : int = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else sep_token __a : Optional[int] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cls_token __a : Optional[int] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else unk_token __a : Dict = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __a : Tuple = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token super().__init__( errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) with open(SCREAMING_SNAKE_CASE__ , encoding='utf-8' ) as vocab_handle: __a : List[Any] = json.load(SCREAMING_SNAKE_CASE__ ) __a : List[str] = {v: k for k, v in self.encoder.items()} __a : Dict = errors # how to handle errors in decoding __a : List[Any] = bytes_to_unicode() __a : List[Any] = {v: k for k, v in self.byte_encoder.items()} with open(SCREAMING_SNAKE_CASE__ , encoding='utf-8' ) as merges_handle: __a : str = merges_handle.read().split('\n' )[1:-1] __a : List[str] = [tuple(merge.split() ) for merge in bpe_merges] __a : str = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) __a : Tuple = {} __a : Union[str, Any] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __a : Optional[Any] = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def __lowerCAmelCase ( self : List[str] ): '''simple docstring''' return len(self.encoder ) def __lowerCAmelCase ( self : Dict ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : Dict ): '''simple docstring''' if token in self.cache: return self.cache[token] __a : List[Any] = tuple(SCREAMING_SNAKE_CASE__ ) __a : Tuple = get_pairs(SCREAMING_SNAKE_CASE__ ) if not pairs: return token while True: __a : Tuple = min(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float('inf' ) ) ) if bigram not in self.bpe_ranks: break __a , __a : str = bigram __a : Optional[Any] = [] __a : List[str] = 0 while i < len(SCREAMING_SNAKE_CASE__ ): try: __a : str = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __a : List[Any] = j if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __a : Optional[int] = tuple(SCREAMING_SNAKE_CASE__ ) __a : Union[str, Any] = new_word if len(SCREAMING_SNAKE_CASE__ ) == 1: break else: __a : Dict = get_pairs(SCREAMING_SNAKE_CASE__ ) __a : Tuple = ' '.join(SCREAMING_SNAKE_CASE__ ) __a : List[Any] = word return word def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' __a : int = [] for token in re.findall(self.pat , SCREAMING_SNAKE_CASE__ ): __a : Optional[Any] = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE__ ).split(' ' ) ) return bpe_tokens def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) ) def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' return self.decoder.get(SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' __a : Optional[Any] = ''.join(SCREAMING_SNAKE_CASE__ ) __a : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __a : int = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) __a : Optional[int] = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + '\n' ) __a : List[Any] = 0 with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ' Please check that the tokenizer is not corrupted!' ) __a : Optional[int] = token_index writer.write(' '.join(SCREAMING_SNAKE_CASE__ ) + '\n' ) index += 1 return vocab_file, merge_file def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __a : Union[str, Any] = [self.cls_token_id] __a : str = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ): '''simple docstring''' __a : Any = [self.sep_token_id] __a : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=False , **SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' __a : Dict = kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE__ ) > 0 and not text[0].isspace()): __a : Tuple = ' ' + text return (text, kwargs) def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[Dict[str, EncodedInput], BatchEncoding] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ): '''simple docstring''' __a : Union[str, Any] = super()._pad( encoded_inputs=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding_strategy=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , ) # Load from model defaults if return_attention_mask is None: __a : Tuple = 'attention_mask' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: __a : Optional[Any] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. __a : int = len(encoded_inputs['global_attention_mask'] ) != len(SCREAMING_SNAKE_CASE__ ) if needs_to_be_padded: __a : Any = len(SCREAMING_SNAKE_CASE__ ) - len(encoded_inputs['global_attention_mask'] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` __a : Optional[Any] = ( encoded_inputs['global_attention_mask'] + [-1] * difference ) elif self.padding_side == "left": __a : List[str] = [-1] * difference + encoded_inputs[ 'global_attention_mask' ] else: raise ValueError('Invalid padding strategy:' + str(self.padding_side ) ) return encoded_inputs
47
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _lowerCamelCase = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = ['SpeechEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = ['FlaxSpeechEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys _lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
6
0
'''simple docstring''' import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class A ( SCREAMING_SNAKE_CASE__ ): def __get__( self : List[Any] , __magic_name__ : Dict , __magic_name__ : Union[str, Any]=None ): """simple docstring""" if obj is None: return self if self.fget is None: raise AttributeError("unreadable attribute" ) lowerCAmelCase__ = "__cached_" + self.fget.__name__ lowerCAmelCase__ = getattr(__magic_name__ , __magic_name__ , __magic_name__ ) if cached is None: lowerCAmelCase__ = self.fget(__magic_name__ ) setattr(__magic_name__ , __magic_name__ , __magic_name__ ) return cached def A ( UpperCamelCase_ : List[str] ) -> str: '''simple docstring''' lowerCAmelCase__ = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(F"""invalid truth value {val!r}""" ) def A ( UpperCamelCase_ : List[Any] ) -> Any: '''simple docstring''' if is_torch_fx_proxy(UpperCamelCase_ ): return True if is_torch_available(): import torch if isinstance(UpperCamelCase_ , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(UpperCamelCase_ , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(UpperCamelCase_ , (jnp.ndarray, Tracer) ): return True return isinstance(UpperCamelCase_ , np.ndarray ) def A ( UpperCamelCase_ : Any ) -> Any: '''simple docstring''' return isinstance(UpperCamelCase_ , np.ndarray ) def A ( UpperCamelCase_ : str ) -> Union[str, Any]: '''simple docstring''' return _is_numpy(UpperCamelCase_ ) def A ( UpperCamelCase_ : Union[str, Any] ) -> Dict: '''simple docstring''' import torch return isinstance(UpperCamelCase_ , torch.Tensor ) def A ( UpperCamelCase_ : str ) -> Optional[Any]: '''simple docstring''' return False if not is_torch_available() else _is_torch(UpperCamelCase_ ) def A ( UpperCamelCase_ : Union[str, Any] ) -> str: '''simple docstring''' import torch return isinstance(UpperCamelCase_ , torch.device ) def A ( UpperCamelCase_ : Any ) -> str: '''simple docstring''' return False if not is_torch_available() else _is_torch_device(UpperCamelCase_ ) def A ( UpperCamelCase_ : Optional[Any] ) -> List[str]: '''simple docstring''' import torch if isinstance(UpperCamelCase_ , UpperCamelCase_ ): if hasattr(UpperCamelCase_ , UpperCamelCase_ ): lowerCAmelCase__ = getattr(UpperCamelCase_ , UpperCamelCase_ ) else: return False return isinstance(UpperCamelCase_ , torch.dtype ) def A ( UpperCamelCase_ : int ) -> Any: '''simple docstring''' return False if not is_torch_available() else _is_torch_dtype(UpperCamelCase_ ) def A ( UpperCamelCase_ : str ) -> Tuple: '''simple docstring''' import tensorflow as tf return isinstance(UpperCamelCase_ , tf.Tensor ) def A ( UpperCamelCase_ : Union[str, Any] ) -> List[str]: '''simple docstring''' return False if not is_tf_available() else _is_tensorflow(UpperCamelCase_ ) def A ( UpperCamelCase_ : Optional[int] ) -> str: '''simple docstring''' import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(UpperCamelCase_ , "is_symbolic_tensor" ): return tf.is_symbolic_tensor(UpperCamelCase_ ) return type(UpperCamelCase_ ) == tf.Tensor def A ( UpperCamelCase_ : Optional[Any] ) -> Optional[Any]: '''simple docstring''' return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCamelCase_ ) def A ( UpperCamelCase_ : Optional[Any] ) -> Optional[Any]: '''simple docstring''' import jax.numpy as jnp # noqa: F811 return isinstance(UpperCamelCase_ , jnp.ndarray ) def A ( UpperCamelCase_ : Union[str, Any] ) -> str: '''simple docstring''' return False if not is_flax_available() else _is_jax(UpperCamelCase_ ) def A ( UpperCamelCase_ : List[str] ) -> Tuple: '''simple docstring''' if isinstance(UpperCamelCase_ , (dict, UserDict) ): return {k: to_py_obj(UpperCamelCase_ ) for k, v in obj.items()} elif isinstance(UpperCamelCase_ , (list, tuple) ): return [to_py_obj(UpperCamelCase_ ) for o in obj] elif is_tf_tensor(UpperCamelCase_ ): return obj.numpy().tolist() elif is_torch_tensor(UpperCamelCase_ ): return obj.detach().cpu().tolist() elif is_jax_tensor(UpperCamelCase_ ): return np.asarray(UpperCamelCase_ ).tolist() elif isinstance(UpperCamelCase_ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def A ( UpperCamelCase_ : Tuple ) -> Optional[Any]: '''simple docstring''' if isinstance(UpperCamelCase_ , (dict, UserDict) ): return {k: to_numpy(UpperCamelCase_ ) for k, v in obj.items()} elif isinstance(UpperCamelCase_ , (list, tuple) ): return np.array(UpperCamelCase_ ) elif is_tf_tensor(UpperCamelCase_ ): return obj.numpy() elif is_torch_tensor(UpperCamelCase_ ): return obj.detach().cpu().numpy() elif is_jax_tensor(UpperCamelCase_ ): return np.asarray(UpperCamelCase_ ) else: return obj class A ( SCREAMING_SNAKE_CASE__ ): def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" lowerCAmelCase__ = fields(self ) # Safety and consistency checks if not len(__magic_name__ ): raise ValueError(f"""{self.__class__.__name__} has no fields.""" ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" ) lowerCAmelCase__ = getattr(self , class_fields[0].name ) lowerCAmelCase__ = all(getattr(self , field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(__magic_name__ ): if isinstance(__magic_name__ , __magic_name__ ): lowerCAmelCase__ = first_field.items() lowerCAmelCase__ = True else: try: lowerCAmelCase__ = iter(__magic_name__ ) lowerCAmelCase__ = True except TypeError: lowerCAmelCase__ = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(__magic_name__ ): if ( not isinstance(__magic_name__ , (list, tuple) ) or not len(__magic_name__ ) == 2 or not isinstance(element[0] , __magic_name__ ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute lowerCAmelCase__ = first_field else: # If we have a mixed iterator, raise an error raise ValueError( f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" ) break setattr(self , element[0] , element[1] ) if element[1] is not None: lowerCAmelCase__ = element[1] elif first_field is not None: lowerCAmelCase__ = first_field else: for field in class_fields: lowerCAmelCase__ = getattr(self , field.name ) if v is not None: lowerCAmelCase__ = v def __delitem__( self : Union[str, Any] , *__magic_name__ : Tuple , **__magic_name__ : Dict ): """simple docstring""" raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" ) def __SCREAMING_SNAKE_CASE ( self : Tuple , *__magic_name__ : Tuple , **__magic_name__ : Dict ): """simple docstring""" raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" ) def __SCREAMING_SNAKE_CASE ( self : int , *__magic_name__ : Union[str, Any] , **__magic_name__ : Union[str, Any] ): """simple docstring""" raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" ) def __SCREAMING_SNAKE_CASE ( self : List[Any] , *__magic_name__ : List[Any] , **__magic_name__ : List[Any] ): """simple docstring""" raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" ) def __getitem__( self : Union[str, Any] , __magic_name__ : Optional[int] ): """simple docstring""" if isinstance(__magic_name__ , __magic_name__ ): lowerCAmelCase__ = dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self : int , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ): """simple docstring""" if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(__magic_name__ , __magic_name__ ) super().__setattr__(__magic_name__ , __magic_name__ ) def __setitem__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : List[Any] ): """simple docstring""" super().__setitem__(__magic_name__ , __magic_name__ ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(__magic_name__ , __magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" return tuple(self[k] for k in self.keys() ) class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): @classmethod def __SCREAMING_SNAKE_CASE ( cls : int , __magic_name__ : Optional[int] ): """simple docstring""" raise ValueError( f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" ) class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :int = 'longest' snake_case__ :Optional[Any] = 'max_length' snake_case__ :str = 'do_not_pad' class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Any = 'pt' snake_case__ :Union[str, Any] = 'tf' snake_case__ :Any = 'np' snake_case__ :List[str] = 'jax' class A : def __init__( self : Dict , __magic_name__ : List[ContextManager] ): """simple docstring""" lowerCAmelCase__ = context_managers lowerCAmelCase__ = ExitStack() def __enter__( self : str ): """simple docstring""" for context_manager in self.context_managers: self.stack.enter_context(__magic_name__ ) def __exit__( self : Dict , *__magic_name__ : Dict , **__magic_name__ : int ): """simple docstring""" self.stack.__exit__(*__magic_name__ , **__magic_name__ ) def A ( UpperCamelCase_ : Dict ) -> List[str]: '''simple docstring''' lowerCAmelCase__ = infer_framework(UpperCamelCase_ ) if framework == "tf": lowerCAmelCase__ = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": lowerCAmelCase__ = inspect.signature(model_class.forward ) # PyTorch models else: lowerCAmelCase__ = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def A ( UpperCamelCase_ : int ) -> int: '''simple docstring''' lowerCAmelCase__ = model_class.__name__ lowerCAmelCase__ = infer_framework(UpperCamelCase_ ) if framework == "tf": lowerCAmelCase__ = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": lowerCAmelCase__ = inspect.signature(model_class.forward ) # PyTorch models else: lowerCAmelCase__ = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def A ( UpperCamelCase_ : MutableMapping , UpperCamelCase_ : str = "" , UpperCamelCase_ : str = "." ) -> List[Any]: '''simple docstring''' def _flatten_dict(UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int]="" , UpperCamelCase_ : Any="." ): for k, v in d.items(): lowerCAmelCase__ = str(UpperCamelCase_ ) + delimiter + str(UpperCamelCase_ ) if parent_key else k if v and isinstance(UpperCamelCase_ , UpperCamelCase_ ): yield from flatten_dict(UpperCamelCase_ , UpperCamelCase_ , delimiter=UpperCamelCase_ ).items() else: yield key, v return dict(_flatten_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) ) @contextmanager def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : bool = False ) -> Any: '''simple docstring''' if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any]=None ) -> Tuple: '''simple docstring''' if is_numpy_array(UpperCamelCase_ ): return np.transpose(UpperCamelCase_ , axes=UpperCamelCase_ ) elif is_torch_tensor(UpperCamelCase_ ): return array.T if axes is None else array.permute(*UpperCamelCase_ ) elif is_tf_tensor(UpperCamelCase_ ): import tensorflow as tf return tf.transpose(UpperCamelCase_ , perm=UpperCamelCase_ ) elif is_jax_tensor(UpperCamelCase_ ): return jnp.transpose(UpperCamelCase_ , axes=UpperCamelCase_ ) else: raise ValueError(F"""Type not supported for transpose: {type(UpperCamelCase_ )}.""" ) def A ( UpperCamelCase_ : str , UpperCamelCase_ : int ) -> Dict: '''simple docstring''' if is_numpy_array(UpperCamelCase_ ): return np.reshape(UpperCamelCase_ , UpperCamelCase_ ) elif is_torch_tensor(UpperCamelCase_ ): return array.reshape(*UpperCamelCase_ ) elif is_tf_tensor(UpperCamelCase_ ): import tensorflow as tf return tf.reshape(UpperCamelCase_ , UpperCamelCase_ ) elif is_jax_tensor(UpperCamelCase_ ): return jnp.reshape(UpperCamelCase_ , UpperCamelCase_ ) else: raise ValueError(F"""Type not supported for reshape: {type(UpperCamelCase_ )}.""" ) def A ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=None ) -> Optional[int]: '''simple docstring''' if is_numpy_array(UpperCamelCase_ ): return np.squeeze(UpperCamelCase_ , axis=UpperCamelCase_ ) elif is_torch_tensor(UpperCamelCase_ ): return array.squeeze() if axis is None else array.squeeze(dim=UpperCamelCase_ ) elif is_tf_tensor(UpperCamelCase_ ): import tensorflow as tf return tf.squeeze(UpperCamelCase_ , axis=UpperCamelCase_ ) elif is_jax_tensor(UpperCamelCase_ ): return jnp.squeeze(UpperCamelCase_ , axis=UpperCamelCase_ ) else: raise ValueError(F"""Type not supported for squeeze: {type(UpperCamelCase_ )}.""" ) def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple ) -> List[Any]: '''simple docstring''' if is_numpy_array(UpperCamelCase_ ): return np.expand_dims(UpperCamelCase_ , UpperCamelCase_ ) elif is_torch_tensor(UpperCamelCase_ ): return array.unsqueeze(dim=UpperCamelCase_ ) elif is_tf_tensor(UpperCamelCase_ ): import tensorflow as tf return tf.expand_dims(UpperCamelCase_ , axis=UpperCamelCase_ ) elif is_jax_tensor(UpperCamelCase_ ): return jnp.expand_dims(UpperCamelCase_ , axis=UpperCamelCase_ ) else: raise ValueError(F"""Type not supported for expand_dims: {type(UpperCamelCase_ )}.""" ) def A ( UpperCamelCase_ : List[str] ) -> Dict: '''simple docstring''' if is_numpy_array(UpperCamelCase_ ): return np.size(UpperCamelCase_ ) elif is_torch_tensor(UpperCamelCase_ ): return array.numel() elif is_tf_tensor(UpperCamelCase_ ): import tensorflow as tf return tf.size(UpperCamelCase_ ) elif is_jax_tensor(UpperCamelCase_ ): return array.size else: raise ValueError(F"""Type not supported for expand_dims: {type(UpperCamelCase_ )}.""" ) def A ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] ) -> Dict: '''simple docstring''' for key, value in auto_map.items(): if isinstance(UpperCamelCase_ , (tuple, list) ): lowerCAmelCase__ = [F"""{repo_id}--{v}""" if (v is not None and "--" not in v) else v for v in value] elif value is not None and "--" not in value: lowerCAmelCase__ = F"""{repo_id}--{value}""" return auto_map def A ( UpperCamelCase_ : str ) -> Any: '''simple docstring''' for base_class in inspect.getmro(UpperCamelCase_ ): lowerCAmelCase__ = base_class.__module__ lowerCAmelCase__ = base_class.__name__ if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel": return "tf" elif module.startswith("torch" ) or name == "PreTrainedModel": return "pt" elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(F"""Could not infer framework from class {model_class}.""" )
48
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = ["image_processor", "tokenizer"] lowerCamelCase_ = "OwlViTImageProcessor" lowerCamelCase_ = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self :Optional[Any] , __A :int=None , __A :Optional[int]=None , **__A :str ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __A , ) SCREAMING_SNAKE_CASE__ = kwargs.pop("""feature_extractor""" ) SCREAMING_SNAKE_CASE__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__A , __A ) def __call__( self :str , __A :Dict=None , __A :List[str]=None , __A :str=None , __A :Optional[int]="max_length" , __A :Tuple="np" , **__A :int ) -> Tuple: """simple docstring""" if text is None and query_images is None and images is None: raise ValueError( """You have to specify at least one text or query image or image. All three cannot be none.""" ) if text is not None: if isinstance(__A , __A ) or (isinstance(__A , __A ) and not isinstance(text[0] , __A )): SCREAMING_SNAKE_CASE__ = [self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )] elif isinstance(__A , __A ) and isinstance(text[0] , __A ): SCREAMING_SNAKE_CASE__ = [] # Maximum number of queries across batch SCREAMING_SNAKE_CASE__ = max([len(__A ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__A ) != max_num_queries: SCREAMING_SNAKE_CASE__ = t + [""" """] * (max_num_queries - len(__A )) SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , padding=__A , return_tensors=__A , **__A ) encodings.append(__A ) else: raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" ) if return_tensors == "np": SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 ) SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) else: raise ValueError("""Target return tensor type could not be returned""" ) SCREAMING_SNAKE_CASE__ = BatchEncoding() SCREAMING_SNAKE_CASE__ = input_ids SCREAMING_SNAKE_CASE__ = attention_mask if query_images is not None: SCREAMING_SNAKE_CASE__ = BatchEncoding() SCREAMING_SNAKE_CASE__ = self.image_processor( __A , return_tensors=__A , **__A ).pixel_values SCREAMING_SNAKE_CASE__ = query_pixel_values if images is not None: SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A ) if text is not None and images is not None: SCREAMING_SNAKE_CASE__ = image_features.pixel_values return encoding elif query_images is not None and images is not None: SCREAMING_SNAKE_CASE__ = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__A ) , tensor_type=__A ) def _snake_case ( self :List[Any] , *__A :Dict , **__A :Dict ) -> Optional[int]: """simple docstring""" return self.image_processor.post_process(*__A , **__A ) def _snake_case ( self :Optional[int] , *__A :Dict , **__A :List[str] ) -> Optional[Any]: """simple docstring""" return self.image_processor.post_process_object_detection(*__A , **__A ) def _snake_case ( self :str , *__A :List[str] , **__A :Union[str, Any] ) -> Any: """simple docstring""" return self.image_processor.post_process_image_guided_detection(*__A , **__A ) def _snake_case ( self :Dict , *__A :List[str] , **__A :List[str] ) -> int: """simple docstring""" return self.tokenizer.batch_decode(*__A , **__A ) def _snake_case ( self :Dict , *__A :Dict , **__A :List[str] ) -> str: """simple docstring""" return self.tokenizer.decode(*__A , **__A ) @property def _snake_case ( self :List[Any] ) -> Optional[int]: """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __A , ) return self.image_processor_class @property def _snake_case ( self :Any ) -> Optional[Any]: """simple docstring""" warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __A , ) return self.image_processor
6
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase : Optional[Any] = { 'configuration_upernet': ['UperNetConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : int = [ 'UperNetForSemanticSegmentation', 'UperNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_upernet import UperNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel else: import sys _lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
49
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = 42 class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ ): @register_to_config def __init__( self :Union[str, Any] , __A :int = 3 , __A :int = 3 , __A :Tuple[str] = ("DownEncoderBlock2D",) , __A :Tuple[str] = ("UpDecoderBlock2D",) , __A :Tuple[int] = (64,) , __A :int = 1 , __A :str = "silu" , __A :int = 3 , __A :int = 32 , __A :int = 256 , __A :int = 32 , __A :Optional[int] = None , __A :float = 0.1_8_2_1_5 , __A :str = "group" , ) -> Any: """simple docstring""" super().__init__() # pass init params to Encoder SCREAMING_SNAKE_CASE__ = Encoder( in_channels=__A , out_channels=__A , down_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , double_z=__A , ) SCREAMING_SNAKE_CASE__ = vq_embed_dim if vq_embed_dim is not None else latent_channels SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 ) SCREAMING_SNAKE_CASE__ = VectorQuantizer(__A , __A , beta=0.2_5 , remap=__A , sane_index_shape=__A ) SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 ) # pass init params to Decoder SCREAMING_SNAKE_CASE__ = Decoder( in_channels=__A , out_channels=__A , up_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , norm_type=__A , ) @apply_forward_hook def _snake_case ( self :Union[str, Any] , __A :torch.FloatTensor , __A :bool = True ) -> VQEncoderOutput: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.encoder(__A ) SCREAMING_SNAKE_CASE__ = self.quant_conv(__A ) if not return_dict: return (h,) return VQEncoderOutput(latents=__A ) @apply_forward_hook def _snake_case ( self :Tuple , __A :torch.FloatTensor , __A :bool = False , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: """simple docstring""" if not force_not_quantize: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.quantize(__A ) else: SCREAMING_SNAKE_CASE__ = h SCREAMING_SNAKE_CASE__ = self.post_quant_conv(__A ) SCREAMING_SNAKE_CASE__ = self.decoder(__A , quant if self.config.norm_type == """spatial""" else None ) if not return_dict: return (dec,) return DecoderOutput(sample=__A ) def _snake_case ( self :int , __A :torch.FloatTensor , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: """simple docstring""" SCREAMING_SNAKE_CASE__ = sample SCREAMING_SNAKE_CASE__ = self.encode(__A ).latents SCREAMING_SNAKE_CASE__ = self.decode(__A ).sample if not return_dict: return (dec,) return DecoderOutput(sample=__A )
6
0
'''simple docstring''' from __future__ import annotations def A__ ( __lowerCAmelCase : tuple[int, int] , __lowerCAmelCase : int ): lowerCamelCase__ , lowerCamelCase__ = position lowerCamelCase__ = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] lowerCamelCase__ = [] for position in positions: lowerCamelCase__ , lowerCamelCase__ = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(__lowerCAmelCase ) return permissible_positions def A__ ( __lowerCAmelCase : list[list[int]] ): return not any(elem == 0 for row in board for elem in row ) def A__ ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : tuple[int, int] , __lowerCAmelCase : int ): if is_complete(__lowerCAmelCase ): return True for position in get_valid_pos(__lowerCAmelCase , len(__lowerCAmelCase ) ): lowerCamelCase__ , lowerCamelCase__ = position if board[y][x] == 0: lowerCamelCase__ = curr + 1 if open_knight_tour_helper(__lowerCAmelCase , __lowerCAmelCase , curr + 1 ): return True lowerCamelCase__ = 0 return False def A__ ( __lowerCAmelCase : int ): lowerCamelCase__ = [[0 for i in range(__lowerCAmelCase )] for j in range(__lowerCAmelCase )] for i in range(__lowerCAmelCase ): for j in range(__lowerCAmelCase ): lowerCamelCase__ = 1 if open_knight_tour_helper(__lowerCAmelCase , (i, j) , 1 ): return board lowerCamelCase__ = 0 lowerCamelCase__ = F'''Open Kight Tour cannot be performed on a board of size {n}''' raise ValueError(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
50
import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = 42 lowerCamelCase_ = jnp.floataa lowerCamelCase_ = True def _snake_case ( self :Tuple ) -> Optional[Any]: """simple docstring""" super().setup() SCREAMING_SNAKE_CASE__ = nn.Dense(5 , dtype=self.dtype ) def __call__( self :List[Any] , *__A :int , **__A :Optional[Any] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = super().__call__(*__A , **__A ) SCREAMING_SNAKE_CASE__ = self.cls(outputs[2] ) return outputs[:2] + (cls_out,) class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = FlaxBigBirdForNaturalQuestionsModule def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Tuple , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ): def cross_entropy(UpperCamelCase__: List[str] , UpperCamelCase__: List[str] , UpperCamelCase__: List[str]=None ): SCREAMING_SNAKE_CASE__ = logits.shape[-1] SCREAMING_SNAKE_CASE__ = (labels[..., None] == jnp.arange(UpperCamelCase__ )[None]).astype("""f4""" ) SCREAMING_SNAKE_CASE__ = jax.nn.log_softmax(UpperCamelCase__ , axis=-1 ) SCREAMING_SNAKE_CASE__ = -jnp.sum(labels * logits , axis=-1 ) if reduction is not None: SCREAMING_SNAKE_CASE__ = reduction(UpperCamelCase__ ) return loss SCREAMING_SNAKE_CASE__ = partial(UpperCamelCase__ , reduction=jnp.mean ) SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class UpperCamelCase_ : lowerCamelCase_ = "google/bigbird-roberta-base" lowerCamelCase_ = 30_00 lowerCamelCase_ = 1_05_00 lowerCamelCase_ = 1_28 lowerCamelCase_ = 3 lowerCamelCase_ = 1 lowerCamelCase_ = 5 # tx_args lowerCamelCase_ = 3e-5 lowerCamelCase_ = 0.0 lowerCamelCase_ = 2_00_00 lowerCamelCase_ = 0.0095 lowerCamelCase_ = "bigbird-roberta-natural-questions" lowerCamelCase_ = "training-expt" lowerCamelCase_ = "data/nq-training.jsonl" lowerCamelCase_ = "data/nq-validation.jsonl" def _snake_case ( self :str ) -> Optional[int]: """simple docstring""" os.makedirs(self.base_dir , exist_ok=__A ) SCREAMING_SNAKE_CASE__ = os.path.join(self.base_dir , self.save_dir ) SCREAMING_SNAKE_CASE__ = self.batch_size_per_device * jax.device_count() @dataclass class UpperCamelCase_ : lowerCamelCase_ = 42 lowerCamelCase_ = 40_96 # no dynamic padding on TPUs def __call__( self :Optional[Any] , __A :Optional[int] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.collate_fn(__A ) SCREAMING_SNAKE_CASE__ = jax.tree_util.tree_map(__A , __A ) return batch def _snake_case ( self :List[Any] , __A :Union[str, Any] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.fetch_inputs(features["""input_ids"""] ) SCREAMING_SNAKE_CASE__ = { """input_ids""": jnp.array(__A , dtype=jnp.intaa ), """attention_mask""": jnp.array(__A , dtype=jnp.intaa ), """start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ), """end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ), """pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ), } return batch def _snake_case ( self :Tuple , __A :list ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = [self._fetch_inputs(__A ) for ids in input_ids] return zip(*__A ) def _snake_case ( self :List[str] , __A :list ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [1 for _ in range(len(__A ) )] while len(__A ) < self.max_length: input_ids.append(self.pad_id ) attention_mask.append(0 ) return input_ids, attention_mask def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any]=None ): if seed is not None: SCREAMING_SNAKE_CASE__ = dataset.shuffle(seed=UpperCamelCase__ ) for i in range(len(UpperCamelCase__ ) // batch_size ): SCREAMING_SNAKE_CASE__ = dataset[i * batch_size : (i + 1) * batch_size] yield dict(UpperCamelCase__ ) @partial(jax.pmap , axis_name="""batch""" ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] , **UpperCamelCase__: Optional[int] ): def loss_fn(UpperCamelCase__: List[Any] ): SCREAMING_SNAKE_CASE__ = model_inputs.pop("""start_labels""" ) SCREAMING_SNAKE_CASE__ = model_inputs.pop("""end_labels""" ) SCREAMING_SNAKE_CASE__ = model_inputs.pop("""pooled_labels""" ) SCREAMING_SNAKE_CASE__ = state.apply_fn(**UpperCamelCase__ , params=UpperCamelCase__ , dropout_rng=UpperCamelCase__ , train=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs return state.loss_fn( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = jax.random.split(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = jax.value_and_grad(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = grad_fn(state.params ) SCREAMING_SNAKE_CASE__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) SCREAMING_SNAKE_CASE__ = jax.lax.pmean(UpperCamelCase__ , """batch""" ) SCREAMING_SNAKE_CASE__ = state.apply_gradients(grads=UpperCamelCase__ ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name="""batch""" ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , **UpperCamelCase__: Dict ): SCREAMING_SNAKE_CASE__ = model_inputs.pop("""start_labels""" ) SCREAMING_SNAKE_CASE__ = model_inputs.pop("""end_labels""" ) SCREAMING_SNAKE_CASE__ = model_inputs.pop("""pooled_labels""" ) SCREAMING_SNAKE_CASE__ = state.apply_fn(**UpperCamelCase__ , params=state.params , train=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs SCREAMING_SNAKE_CASE__ = state.loss_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) return metrics class UpperCamelCase_ ( train_state.TrainState ): lowerCamelCase_ = struct.field(pytree_node=UpperCamelCase__ ) @dataclass class UpperCamelCase_ : lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = None def _snake_case ( self :List[Any] , __A :str , __A :str , __A :str , __A :Tuple=None ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = model.params SCREAMING_SNAKE_CASE__ = TrainState.create( apply_fn=model.__call__ , params=__A , tx=__A , loss_fn=__A , ) if ckpt_dir is not None: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = restore_checkpoint(__A , __A ) SCREAMING_SNAKE_CASE__ = { """lr""": args.lr, """init_lr""": args.init_lr, """warmup_steps""": args.warmup_steps, """num_train_steps""": num_train_steps, """weight_decay""": args.weight_decay, } SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = build_tx(**__A ) SCREAMING_SNAKE_CASE__ = train_state.TrainState( step=__A , apply_fn=model.__call__ , params=__A , tx=__A , opt_state=__A , ) SCREAMING_SNAKE_CASE__ = args SCREAMING_SNAKE_CASE__ = data_collator SCREAMING_SNAKE_CASE__ = lr SCREAMING_SNAKE_CASE__ = params SCREAMING_SNAKE_CASE__ = jax_utils.replicate(__A ) return state def _snake_case ( self :Optional[Any] , __A :Optional[int] , __A :int , __A :int ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.args SCREAMING_SNAKE_CASE__ = len(__A ) // args.batch_size SCREAMING_SNAKE_CASE__ = jax.random.PRNGKey(0 ) SCREAMING_SNAKE_CASE__ = jax.random.split(__A , jax.device_count() ) for epoch in range(args.max_epochs ): SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE__ = get_batched_dataset(__A , args.batch_size , seed=__A ) SCREAMING_SNAKE_CASE__ = 0 for batch in tqdm(__A , total=__A , desc=f'''Running EPOCH-{epoch}''' ): SCREAMING_SNAKE_CASE__ = self.data_collator(__A ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.train_step_fn(__A , __A , **__A ) running_loss += jax_utils.unreplicate(metrics["""loss"""] ) i += 1 if i % args.logging_steps == 0: SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(state.step ) SCREAMING_SNAKE_CASE__ = running_loss.item() / i SCREAMING_SNAKE_CASE__ = self.scheduler_fn(state_step - 1 ) SCREAMING_SNAKE_CASE__ = self.evaluate(__A , __A ) SCREAMING_SNAKE_CASE__ = { """step""": state_step.item(), """eval_loss""": eval_loss.item(), """tr_loss""": tr_loss, """lr""": lr.item(), } tqdm.write(str(__A ) ) self.logger.log(__A , commit=__A ) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=__A ) def _snake_case ( self :List[str] , __A :Dict , __A :str ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = get_batched_dataset(__A , self.args.batch_size ) SCREAMING_SNAKE_CASE__ = len(__A ) // self.args.batch_size SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE__ = 0 for batch in tqdm(__A , total=__A , desc="""Evaluating ... """ ): SCREAMING_SNAKE_CASE__ = self.data_collator(__A ) SCREAMING_SNAKE_CASE__ = self.val_step_fn(__A , **__A ) running_loss += jax_utils.unreplicate(metrics["""loss"""] ) i += 1 return running_loss / i def _snake_case ( self :List[Any] , __A :Any , __A :Dict ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(__A ) print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=""" ... """ ) self.model_save_fn(__A , params=state.params ) with open(os.path.join(__A , """opt_state.msgpack""" ) , """wb""" ) as f: f.write(to_bytes(state.opt_state ) ) joblib.dump(self.args , os.path.join(__A , """args.joblib""" ) ) joblib.dump(self.data_collator , os.path.join(__A , """data_collator.joblib""" ) ) with open(os.path.join(__A , """training_state.json""" ) , """w""" ) as f: json.dump({"""step""": state.step.item()} , __A ) print("""DONE""" ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ): print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=""" ... """ ) with open(os.path.join(UpperCamelCase__ , """flax_model.msgpack""" ) , """rb""" ) as f: SCREAMING_SNAKE_CASE__ = from_bytes(state.params , f.read() ) with open(os.path.join(UpperCamelCase__ , """opt_state.msgpack""" ) , """rb""" ) as f: SCREAMING_SNAKE_CASE__ = from_bytes(state.opt_state , f.read() ) SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(UpperCamelCase__ , """args.joblib""" ) ) SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(UpperCamelCase__ , """data_collator.joblib""" ) ) with open(os.path.join(UpperCamelCase__ , """training_state.json""" ) , """r""" ) as f: SCREAMING_SNAKE_CASE__ = json.load(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = training_state["""step"""] print("""DONE""" ) return params, opt_state, step, args, data_collator def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ): SCREAMING_SNAKE_CASE__ = num_train_steps - warmup_steps SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=UpperCamelCase__ , transition_steps=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=1e-7 , transition_steps=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple ): def weight_decay_mask(UpperCamelCase__: Any ): SCREAMING_SNAKE_CASE__ = traverse_util.flatten_dict(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()} return traverse_util.unflatten_dict(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = scheduler_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = optax.adamw(learning_rate=UpperCamelCase__ , weight_decay=UpperCamelCase__ , mask=UpperCamelCase__ ) return tx, lr
6
0
'''simple docstring''' import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple=7 ) -> Dict: """simple docstring""" UpperCAmelCase = None if token is not None: UpperCAmelCase = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"Bearer {token}"} # The id of a workflow (not of a workflow run) UpperCAmelCase = '''636036''' UpperCAmelCase = f"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs" # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += f"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}" UpperCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json() return result["workflow_runs"] def __snake_case ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[Any]: """simple docstring""" UpperCAmelCase = get_daily_ci_runs(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": UpperCAmelCase = workflow_run['''id'''] break return workflow_run_id def __snake_case ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple ) -> int: """simple docstring""" UpperCAmelCase = get_last_daily_ci_runs(SCREAMING_SNAKE_CASE_ ) if workflow_run_id is not None: UpperCAmelCase = get_artifacts_links(worflow_run_id=SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ ) for artifact_name in artifact_names: if artifact_name in artifacts_links: UpperCAmelCase = artifacts_links[artifact_name] download_artifact( artifact_name=SCREAMING_SNAKE_CASE_ , artifact_url=SCREAMING_SNAKE_CASE_ , output_dir=SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ ) def __snake_case ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int ) -> List[Any]: """simple docstring""" get_last_daily_ci_artifacts(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) UpperCAmelCase = {} for artifact_name in artifact_names: UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , f"{artifact_name}.zip" ) if os.path.isfile(SCREAMING_SNAKE_CASE_ ): UpperCAmelCase = {} with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z: for filename in z.namelist(): if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): # read the file with z.open(SCREAMING_SNAKE_CASE_ ) as f: UpperCAmelCase = f.read().decode('''UTF-8''' ) return results
51
from torch import nn def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(f'''Unsupported activation function: {act_fn}''' )
6
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = ['''XLNetTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = ['''XLNetTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ '''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLNetForMultipleChoice''', '''XLNetForQuestionAnswering''', '''XLNetForQuestionAnsweringSimple''', '''XLNetForSequenceClassification''', '''XLNetForTokenClassification''', '''XLNetLMHeadModel''', '''XLNetModel''', '''XLNetPreTrainedModel''', '''load_tf_weights_in_xlnet''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ '''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXLNetForMultipleChoice''', '''TFXLNetForQuestionAnsweringSimple''', '''TFXLNetForSequenceClassification''', '''TFXLNetForTokenClassification''', '''TFXLNetLMHeadModel''', '''TFXLNetMainLayer''', '''TFXLNetModel''', '''TFXLNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
52
import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): SCREAMING_SNAKE_CASE__ = SwinConfig.from_pretrained( """microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) SCREAMING_SNAKE_CASE__ = MaskFormerConfig(backbone_config=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = """huggingface/label-files""" if "ade20k-full" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 847 SCREAMING_SNAKE_CASE__ = """maskformer-ade20k-full-id2label.json""" elif "ade" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 150 SCREAMING_SNAKE_CASE__ = """ade20k-id2label.json""" elif "coco-stuff" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 171 SCREAMING_SNAKE_CASE__ = """maskformer-coco-stuff-id2label.json""" elif "coco" in model_name: # TODO SCREAMING_SNAKE_CASE__ = 133 SCREAMING_SNAKE_CASE__ = """coco-panoptic-id2label.json""" elif "cityscapes" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 19 SCREAMING_SNAKE_CASE__ = """cityscapes-id2label.json""" elif "vistas" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 65 SCREAMING_SNAKE_CASE__ = """mapillary-vistas-id2label.json""" SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) ) SCREAMING_SNAKE_CASE__ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} return config def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ): SCREAMING_SNAKE_CASE__ = [] # stem # fmt: off rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((f'''backbone.layers.{i}.downsample.reduction.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((f'''backbone.layers.{i}.downsample.norm.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((f'''backbone.layers.{i}.downsample.norm.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') ) # FPN rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((f'''sem_seg_head.adapter_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') ) rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') ) rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') ) rename_keys.append((f'''sem_seg_head.layer_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') ) rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') ) rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') ) rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") ) rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') ) # cross-attention out projection rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') ) # MLP 1 rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') ) # MLP 2 rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') ) # layernorm 1 (self-attention layernorm) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') ) # layernorm 2 (cross-attention layernorm) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') ) # layernorm 3 (final layernorm) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") ) # heads on top rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") ) for i in range(3 ): rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', f'''mask_embedder.{i}.0.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', f'''mask_embedder.{i}.0.bias''') ) # fmt: on return rename_keys def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] ): SCREAMING_SNAKE_CASE__ = dct.pop(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = val def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] ): SCREAMING_SNAKE_CASE__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): SCREAMING_SNAKE_CASE__ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' ) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ = in_proj_weight[:dim, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[: dim] SCREAMING_SNAKE_CASE__ = in_proj_weight[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE__ = in_proj_bias[ dim : dim * 2 ] SCREAMING_SNAKE_CASE__ = in_proj_weight[ -dim :, : ] SCREAMING_SNAKE_CASE__ = in_proj_bias[-dim :] # fmt: on def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ): # fmt: off SCREAMING_SNAKE_CASE__ = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' ) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size] SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2] SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' ) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size] SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2] SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :] # fmt: on def SCREAMING_SNAKE_CASE__ ( ): SCREAMING_SNAKE_CASE__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" SCREAMING_SNAKE_CASE__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: bool = False ): SCREAMING_SNAKE_CASE__ = get_maskformer_config(UpperCamelCase__ ) # load original state_dict with open(UpperCamelCase__ , """rb""" ) as f: SCREAMING_SNAKE_CASE__ = pickle.load(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = data["""model"""] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys SCREAMING_SNAKE_CASE__ = create_rename_keys(UpperCamelCase__ ) for src, dest in rename_keys: rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) read_in_swin_q_k_v(UpperCamelCase__ , config.backbone_config ) read_in_decoder_q_k_v(UpperCamelCase__ , UpperCamelCase__ ) # update to torch tensors for key, value in state_dict.items(): SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase__ ) # load 🤗 model SCREAMING_SNAKE_CASE__ = MaskFormerForInstanceSegmentation(UpperCamelCase__ ) model.eval() for name, param in model.named_parameters(): print(UpperCamelCase__ , param.shape ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(UpperCamelCase__ ) == 0, f'''Unexpected keys: {unexpected_keys}''' # verify results SCREAMING_SNAKE_CASE__ = prepare_img() if "vistas" in model_name: SCREAMING_SNAKE_CASE__ = 65 elif "cityscapes" in model_name: SCREAMING_SNAKE_CASE__ = 65_535 else: SCREAMING_SNAKE_CASE__ = 255 SCREAMING_SNAKE_CASE__ = True if """ade""" in model_name else False SCREAMING_SNAKE_CASE__ = MaskFormerImageProcessor(ignore_index=UpperCamelCase__ , reduce_labels=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = image_processor(UpperCamelCase__ , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE__ = model(**UpperCamelCase__ ) print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": SCREAMING_SNAKE_CASE__ = torch.tensor( [[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' ) Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) image_processor.save_pretrained(UpperCamelCase__ ) if push_to_hub: print("""Pushing model and image processor to the hub...""" ) model.push_to_hub(f'''nielsr/{model_name}''' ) image_processor.push_to_hub(f'''nielsr/{model_name}''' ) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='maskformer-swin-tiny-ade', type=str, help=('Name of the MaskFormer model you\'d like to convert',), ) parser.add_argument( '--checkpoint_path', default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl', type=str, help='Path to the original state dict (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) _lowerCamelCase = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
6
0
def a_ ( lowerCAmelCase_ : float, lowerCAmelCase_ : float, lowerCAmelCase_ : float, lowerCAmelCase_ : float, lowerCAmelCase_ : float, ): __lowerCAmelCase = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError('All input parameters must be positive' ) if any(p > 1 for p in parameters[1:4] ): raise ValueError('Relative densities cannot be greater than one' ) else: __lowerCAmelCase = 1 - (matter_density + radiation_density + dark_energy) __lowerCAmelCase = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) __lowerCAmelCase = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation _snake_case : Dict = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1e-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
53
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCamelCase = logging.get_logger(__name__) _lowerCamelCase = { 'nielsr/canine-s': 2048, } # Unicode defines 1,114,112 total “codepoints” _lowerCamelCase = 1114112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py _lowerCamelCase = 0 _lowerCamelCase = 0XE0_00 _lowerCamelCase = 0XE0_01 _lowerCamelCase = 0XE0_02 _lowerCamelCase = 0XE0_03 _lowerCamelCase = 0XE0_04 # Maps special codepoints to human-readable names. _lowerCamelCase = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. _lowerCamelCase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self :str , __A :str=chr(__A ) , __A :str=chr(__A ) , __A :Dict=chr(__A ) , __A :str=chr(__A ) , __A :Union[str, Any]=chr(__A ) , __A :str=chr(__A ) , __A :int=False , __A :int=2048 , **__A :Dict , ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token super().__init__( bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , model_max_length=__A , **__A , ) # Creates a mapping for looking up the IDs of special symbols. SCREAMING_SNAKE_CASE__ = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): SCREAMING_SNAKE_CASE__ = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. SCREAMING_SNAKE_CASE__ = { codepoint: name for name, codepoint in self._special_codepoints.items() } SCREAMING_SNAKE_CASE__ = UNICODE_VOCAB_SIZE SCREAMING_SNAKE_CASE__ = len(self._special_codepoints ) @property def _snake_case ( self :Optional[Any] ) -> int: """simple docstring""" return self._unicode_vocab_size def _snake_case ( self :Tuple , __A :str ) -> List[str]: """simple docstring""" return list(__A ) def _snake_case ( self :Optional[Any] , __A :str ) -> int: """simple docstring""" try: return ord(__A ) except TypeError: raise ValueError(f'''invalid token: \'{token}\'''' ) def _snake_case ( self :str , __A :int ) -> str: """simple docstring""" try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(__A ) except TypeError: raise ValueError(f'''invalid id: {index}''' ) def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Any: """simple docstring""" return "".join(__A ) def _snake_case ( self :Optional[Any] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [self.sep_token_id] SCREAMING_SNAKE_CASE__ = [self.cls_token_id] SCREAMING_SNAKE_CASE__ = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def _snake_case ( self :List[Any] , __A :List[int] , __A :Optional[List[int]] = None , __A :bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) SCREAMING_SNAKE_CASE__ = [1] + ([0] * len(__A )) + [1] if token_ids_a is not None: result += ([0] * len(__A )) + [1] return result def _snake_case ( self :List[str] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [self.sep_token_id] SCREAMING_SNAKE_CASE__ = [self.cls_token_id] SCREAMING_SNAKE_CASE__ = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def _snake_case ( self :int , __A :str , __A :Optional[str] = None ) -> Any: """simple docstring""" return ()
6
0
def a__ ( lowercase__ , lowercase__ ): '''simple docstring''' return abs(lowercase__ ) if a == 0 else greatest_common_divisor(b % a , lowercase__ ) def a__ ( lowercase__ , lowercase__ ): '''simple docstring''' while y: # --> when y=0 then loop will terminate and return x as final GCD. UpperCAmelCase_ , UpperCAmelCase_ =y, x % y return abs(lowercase__ ) def a__ ( ): '''simple docstring''' try: UpperCAmelCase_ =input("Enter two integers separated by comma (,): " ).split("," ) UpperCAmelCase_ =int(nums[0] ) UpperCAmelCase_ =int(nums[1] ) print( F'greatest_common_divisor({num_a}, {num_a}) = ' F'{greatest_common_divisor(lowercase__ , lowercase__ )}' ) print(F'By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(lowercase__ , lowercase__ )}' ) except (IndexError, UnboundLocalError, ValueError): print("Wrong input" ) if __name__ == "__main__": main()
54
import inspect import os import torch from transformers import AutoModel from transformers.testing_utils import mockenv_context from transformers.trainer_utils import set_seed import accelerate from accelerate.accelerator import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils.testing import ( AccelerateTestCase, TempDirTestCase, execute_subprocess_async, require_cuda, require_fsdp, require_multi_gpu, slow, ) from accelerate.utils.constants import ( FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, ) from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin from accelerate.utils.other import patch_environment set_seed(42) _lowerCamelCase = 'bert-base-cased' _lowerCamelCase = 'fp16' _lowerCamelCase = 'bf16' _lowerCamelCase = [FPaa, BFaa] @require_fsdp @require_cuda class UpperCamelCase_ ( UpperCamelCase__ ): def _snake_case ( self :Optional[Any] ) -> Optional[Any]: """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE__ = dict( ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , ) def _snake_case ( self :List[Any] ) -> Tuple: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy for i, strategy in enumerate(__A ): SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = f'''{i + 1}''' SCREAMING_SNAKE_CASE__ = strategy with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) ) def _snake_case ( self :int ) -> List[str]: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch for i, prefetch_policy in enumerate(__A ): SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = prefetch_policy with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() if prefetch_policy == "NO_PREFETCH": self.assertIsNone(fsdp_plugin.backward_prefetch ) else: self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) ) def _snake_case ( self :List[str] ) -> List[str]: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType for i, state_dict_type in enumerate(__A ): SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = state_dict_type with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) ) if state_dict_type == "FULL_STATE_DICT": self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu ) self.assertTrue(fsdp_plugin.state_dict_config.ranka_only ) def _snake_case ( self :str ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = AutoModel.from_pretrained(__A ) for policy in FSDP_AUTO_WRAP_POLICY: SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = policy if policy == "TRANSFORMER_BASED_WRAP": SCREAMING_SNAKE_CASE__ = """BertLayer""" elif policy == "SIZE_BASED_WRAP": SCREAMING_SNAKE_CASE__ = """2000""" with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__A ) if policy == "NO_WRAP": self.assertIsNone(fsdp_plugin.auto_wrap_policy ) else: self.assertIsNotNone(fsdp_plugin.auto_wrap_policy ) SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = """TRANSFORMER_BASED_WRAP""" SCREAMING_SNAKE_CASE__ = """T5Layer""" with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() with self.assertRaises(__A ) as cm: fsdp_plugin.set_auto_wrap_policy(__A ) self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) ) SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = """SIZE_BASED_WRAP""" SCREAMING_SNAKE_CASE__ = """0""" with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__A ) self.assertIsNone(fsdp_plugin.auto_wrap_policy ) def _snake_case ( self :Optional[Any] ) -> Optional[int]: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = mp_dtype with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = Accelerator() if mp_dtype == "fp16": SCREAMING_SNAKE_CASE__ = torch.floataa elif mp_dtype == "bf16": SCREAMING_SNAKE_CASE__ = torch.bfloataa SCREAMING_SNAKE_CASE__ = MixedPrecision(param_dtype=__A , reduce_dtype=__A , buffer_dtype=__A ) self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __A ) if mp_dtype == FPaa: self.assertTrue(isinstance(accelerator.scaler , __A ) ) elif mp_dtype == BFaa: self.assertIsNone(accelerator.scaler ) AcceleratorState._reset_state(__A ) def _snake_case ( self :str ) -> str: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload for flag in [True, False]: SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = str(__A ).lower() with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__A ) ) @require_fsdp @require_multi_gpu @slow class UpperCamelCase_ ( UpperCamelCase__ ): def _snake_case ( self :Any ) -> Any: """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE__ = 0.8_2 SCREAMING_SNAKE_CASE__ = [ """fsdp_shard_grad_op_transformer_based_wrap""", """fsdp_full_shard_transformer_based_wrap""", ] SCREAMING_SNAKE_CASE__ = { """multi_gpu_fp16""": 3200, """fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000, """fsdp_full_shard_transformer_based_wrap_fp16""": 1900, # Disabling below test as it overwhelms the RAM memory usage # on CI self-hosted runner leading to tests getting killed. # "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang } SCREAMING_SNAKE_CASE__ = 160 SCREAMING_SNAKE_CASE__ = 160 SCREAMING_SNAKE_CASE__ = inspect.getfile(accelerate.test_utils ) SCREAMING_SNAKE_CASE__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] ) def _snake_case ( self :Union[str, Any] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_performance.py""" ) SCREAMING_SNAKE_CASE__ = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""] for config in self.performance_configs: SCREAMING_SNAKE_CASE__ = cmd.copy() for i, strategy in enumerate(__A ): if strategy.lower() in config: cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' ) break if "fp32" in config: cmd_config.append("""--mixed_precision=no""" ) else: cmd_config.append("""--mixed_precision=fp16""" ) if "cpu_offload" in config: cmd_config.append("""--fsdp_offload_params=True""" ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in config: cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" ) elif policy == "SIZE_BASED_WRAP": cmd_config.append("""--fsdp_min_num_params=2000""" ) cmd_config.extend( [ self.test_file_path, f'''--output_dir={self.tmpdir}''', f'''--performance_lower_bound={self.performance_lower_bound}''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A , env=os.environ.copy() ) def _snake_case ( self :Dict ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" ) SCREAMING_SNAKE_CASE__ = [ """accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp""", """--mixed_precision=fp16""", """--fsdp_transformer_layer_cls_to_wrap=BertLayer""", ] for i, strategy in enumerate(__A ): SCREAMING_SNAKE_CASE__ = cmd.copy() cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' ) if strategy != "FULL_SHARD": continue SCREAMING_SNAKE_CASE__ = len(__A ) for state_dict_type in FSDP_STATE_DICT_TYPE: SCREAMING_SNAKE_CASE__ = cmd_config[:state_dict_config_index] cmd_config.append(f'''--fsdp_state_dict_type={state_dict_type}''' ) cmd_config.extend( [ self.test_file_path, f'''--output_dir={self.tmpdir}''', """--partial_train_epoch=1""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A , env=os.environ.copy() ) SCREAMING_SNAKE_CASE__ = cmd_config[:-1] SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdir , """epoch_0""" ) cmd_config.extend( [ f'''--resume_from_checkpoint={resume_from_checkpoint}''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A , env=os.environ.copy() ) def _snake_case ( self :Tuple ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" ) SCREAMING_SNAKE_CASE__ = [ """accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", ] for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): SCREAMING_SNAKE_CASE__ = cmd.copy() if "fp16" in spec: cmd_config.extend(["""--mixed_precision=fp16"""] ) else: cmd_config.extend(["""--mixed_precision=no"""] ) if "multi_gpu" in spec: continue else: cmd_config.extend(["""--use_fsdp"""] ) for i, strategy in enumerate(__A ): if strategy.lower() in spec: cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' ) break if "cpu_offload" in spec: cmd_config.append("""--fsdp_offload_params=True""" ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in spec: cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" ) elif policy == "SIZE_BASED_WRAP": cmd_config.append("""--fsdp_min_num_params=2000""" ) cmd_config.extend( [ self.test_file_path, f'''--output_dir={self.tmpdir}''', f'''--peak_memory_upper_bound={peak_mem_upper_bound}''', f'''--n_train={self.n_train}''', f'''--n_val={self.n_val}''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A , env=os.environ.copy() )
6
0
def UpperCAmelCase ( a_ ) -> bool: """simple docstring""" if not isinstance(a_ , a_ ): __A = F'''Input value of [number={number}] must be an integer''' raise TypeError(a_ ) if number < 0: return False __A = number * number while number > 0: if number % 1_0 != number_square % 1_0: return False number //= 1_0 number_square //= 1_0 return True if __name__ == "__main__": import doctest doctest.testmod()
55
import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig _lowerCamelCase = logging.get_logger(__name__) # General docstring _lowerCamelCase = 'PoolFormerConfig' # Base docstring _lowerCamelCase = 'sail/poolformer_s12' _lowerCamelCase = [1, 512, 7, 7] # Image classification docstring _lowerCamelCase = 'sail/poolformer_s12' _lowerCamelCase = 'tabby, tabby cat' _lowerCamelCase = [ 'sail/poolformer_s12', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: float = 0.0 , UpperCamelCase__: bool = False ): if drop_prob == 0.0 or not training: return input SCREAMING_SNAKE_CASE__ = 1 - drop_prob SCREAMING_SNAKE_CASE__ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets SCREAMING_SNAKE_CASE__ = keep_prob + torch.rand(UpperCamelCase__ , dtype=input.dtype , device=input.device ) random_tensor.floor_() # binarize SCREAMING_SNAKE_CASE__ = input.div(UpperCamelCase__ ) * random_tensor return output class UpperCamelCase_ ( nn.Module ): def __init__( self :Optional[Any] , __A :Optional[float] = None ) -> None: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = drop_prob def _snake_case ( self :Any , __A :torch.Tensor ) -> torch.Tensor: """simple docstring""" return drop_path(__A , self.drop_prob , self.training ) def _snake_case ( self :Dict ) -> str: """simple docstring""" return "p={}".format(self.drop_prob ) class UpperCamelCase_ ( nn.Module ): def __init__( self :Dict , __A :Optional[Any] , __A :Dict , __A :List[str] , __A :Optional[Any] , __A :Tuple , __A :Optional[Any]=None ) -> Union[str, Any]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = patch_size if isinstance(__A , collections.abc.Iterable ) else (patch_size, patch_size) SCREAMING_SNAKE_CASE__ = stride if isinstance(__A , collections.abc.Iterable ) else (stride, stride) SCREAMING_SNAKE_CASE__ = padding if isinstance(__A , collections.abc.Iterable ) else (padding, padding) SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , kernel_size=__A , stride=__A , padding=__A ) SCREAMING_SNAKE_CASE__ = norm_layer(__A ) if norm_layer else nn.Identity() def _snake_case ( self :Dict , __A :Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.projection(__A ) SCREAMING_SNAKE_CASE__ = self.norm(__A ) return embeddings class UpperCamelCase_ ( nn.GroupNorm ): def __init__( self :Dict , __A :Tuple , **__A :Union[str, Any] ) -> Dict: """simple docstring""" super().__init__(1 , __A , **__A ) class UpperCamelCase_ ( nn.Module ): def __init__( self :List[str] , __A :Optional[int] ) -> Any: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = nn.AvgPoolad(__A , stride=1 , padding=pool_size // 2 , count_include_pad=__A ) def _snake_case ( self :Any , __A :Optional[Any] ) -> Optional[Any]: """simple docstring""" return self.pool(__A ) - hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self :Optional[Any] , __A :Tuple , __A :Dict , __A :int , __A :Any ) -> str: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 ) SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 ) SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A ) if isinstance(config.hidden_act , __A ): SCREAMING_SNAKE_CASE__ = ACTaFN[config.hidden_act] else: SCREAMING_SNAKE_CASE__ = config.hidden_act def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.conva(__A ) SCREAMING_SNAKE_CASE__ = self.act_fn(__A ) SCREAMING_SNAKE_CASE__ = self.drop(__A ) SCREAMING_SNAKE_CASE__ = self.conva(__A ) SCREAMING_SNAKE_CASE__ = self.drop(__A ) return hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self :Any , __A :str , __A :List[str] , __A :Tuple , __A :Dict , __A :Union[str, Any] , __A :int ) -> Optional[int]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = PoolFormerPooling(__A ) SCREAMING_SNAKE_CASE__ = PoolFormerOutput(__A , __A , __A , __A ) SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A ) SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A ) # Useful for training neural nets SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A ) if drop_path > 0.0 else nn.Identity() SCREAMING_SNAKE_CASE__ = config.use_layer_scale if config.use_layer_scale: SCREAMING_SNAKE_CASE__ = nn.Parameter( config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A ) SCREAMING_SNAKE_CASE__ = nn.Parameter( config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A ) def _snake_case ( self :Optional[Any] , __A :Optional[int] ) -> str: """simple docstring""" if self.use_layer_scale: SCREAMING_SNAKE_CASE__ = self.pooling(self.before_norm(__A ) ) SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A ) SCREAMING_SNAKE_CASE__ = () SCREAMING_SNAKE_CASE__ = self.output(self.after_norm(__A ) ) SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A ) SCREAMING_SNAKE_CASE__ = (output,) + outputs return outputs else: SCREAMING_SNAKE_CASE__ = self.drop_path(self.pooling(self.before_norm(__A ) ) ) # First residual connection SCREAMING_SNAKE_CASE__ = pooling_output + hidden_states SCREAMING_SNAKE_CASE__ = () # Second residual connection inside the PoolFormerOutput block SCREAMING_SNAKE_CASE__ = self.drop_path(self.output(self.after_norm(__A ) ) ) SCREAMING_SNAKE_CASE__ = hidden_states + layer_output SCREAMING_SNAKE_CASE__ = (output,) + outputs return outputs class UpperCamelCase_ ( nn.Module ): def __init__( self :Union[str, Any] , __A :List[Any] ) -> Union[str, Any]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = config # stochastic depth decay rule SCREAMING_SNAKE_CASE__ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings SCREAMING_SNAKE_CASE__ = [] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A ) # Transformer blocks SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = 0 for i in range(config.num_encoder_blocks ): # each block consists of layers SCREAMING_SNAKE_CASE__ = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( __A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(__A ) ) SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A ) def _snake_case ( self :str , __A :Tuple , __A :Dict=False , __A :Tuple=True ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = () if output_hidden_states else None SCREAMING_SNAKE_CASE__ = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = layers # Get patch embeddings from hidden_states SCREAMING_SNAKE_CASE__ = embedding_layer(__A ) # Send the embeddings through the blocks for _, blk in enumerate(__A ): SCREAMING_SNAKE_CASE__ = blk(__A ) SCREAMING_SNAKE_CASE__ = layer_outputs[0] if output_hidden_states: SCREAMING_SNAKE_CASE__ = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=__A , hidden_states=__A ) class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = PoolFormerConfig lowerCamelCase_ = "poolformer" lowerCamelCase_ = "pixel_values" lowerCamelCase_ = True def _snake_case ( self :Optional[Any] , __A :Tuple ) -> Dict: """simple docstring""" if isinstance(__A , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__A , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def _snake_case ( self :str , __A :Optional[Any] , __A :Union[str, Any]=False ) -> Any: """simple docstring""" if isinstance(__A , __A ): SCREAMING_SNAKE_CASE__ = value _lowerCamelCase = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' _lowerCamelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n' @add_start_docstrings( "The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , ) class UpperCamelCase_ ( UpperCamelCase__ ): def __init__( self :Union[str, Any] , __A :Any ) -> int: """simple docstring""" super().__init__(__A ) SCREAMING_SNAKE_CASE__ = config SCREAMING_SNAKE_CASE__ = PoolFormerEncoder(__A ) # Initialize weights and apply final processing self.post_init() def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(__A ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__A , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _snake_case ( self :Dict , __A :Optional[torch.FloatTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]: """simple docstring""" SCREAMING_SNAKE_CASE__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("""You have to specify pixel_values""" ) SCREAMING_SNAKE_CASE__ = self.encoder( __A , output_hidden_states=__A , return_dict=__A , ) SCREAMING_SNAKE_CASE__ = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=__A , hidden_states=encoder_outputs.hidden_states , ) class UpperCamelCase_ ( nn.Module ): def __init__( self :int , __A :Optional[int] ) -> Tuple: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = nn.Linear(config.hidden_size , config.hidden_size ) def _snake_case ( self :List[Any] , __A :Dict ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.dense(__A ) return output @add_start_docstrings( "\n PoolFormer Model transformer with an image classification head on top\n " , UpperCamelCase__ , ) class UpperCamelCase_ ( UpperCamelCase__ ): def __init__( self :str , __A :Union[str, Any] ) -> int: """simple docstring""" super().__init__(__A ) SCREAMING_SNAKE_CASE__ = config.num_labels SCREAMING_SNAKE_CASE__ = PoolFormerModel(__A ) # Final norm SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head SCREAMING_SNAKE_CASE__ = ( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__A ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _snake_case ( self :int , __A :Optional[torch.FloatTensor] = None , __A :Optional[torch.LongTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]: """simple docstring""" SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict SCREAMING_SNAKE_CASE__ = self.poolformer( __A , output_hidden_states=__A , return_dict=__A , ) SCREAMING_SNAKE_CASE__ = outputs[0] SCREAMING_SNAKE_CASE__ = self.classifier(self.norm(__A ).mean([-2, -1] ) ) SCREAMING_SNAKE_CASE__ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: SCREAMING_SNAKE_CASE__ = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): SCREAMING_SNAKE_CASE__ = """single_label_classification""" else: SCREAMING_SNAKE_CASE__ = """multi_label_classification""" if self.config.problem_type == "regression": SCREAMING_SNAKE_CASE__ = MSELoss() if self.num_labels == 1: SCREAMING_SNAKE_CASE__ = loss_fct(logits.squeeze() , labels.squeeze() ) else: SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A ) elif self.config.problem_type == "single_label_classification": SCREAMING_SNAKE_CASE__ = CrossEntropyLoss() SCREAMING_SNAKE_CASE__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": SCREAMING_SNAKE_CASE__ = BCEWithLogitsLoss() SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A ) if not return_dict: SCREAMING_SNAKE_CASE__ = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=__A , logits=__A , hidden_states=outputs.hidden_states )
6
0
'''simple docstring''' import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand _a : str = ( "4S 3H 2C 7S 5H", "9D 8H 2C 6S 7H", "2D 6D 9D TH 7D", "TC 8C 2S JH 6C", "JH 8S TH AH QH", "TS KS 5S 9S AC", "KD 6S 9D TH AD", "KS 8D 4D 9S 4S", # pair "8C 4S KH JS 4D", # pair "QH 8H KD JH 8S", # pair "KC 4H KS 2H 8D", # pair "KD 4S KC 3H 8S", # pair "AH 8S AS KC JH", # pair "3H 4C 4H 3S 2H", # 2 pairs "5S 5D 2C KH KH", # 2 pairs "3C KH 5D 5S KH", # 2 pairs "AS 3C KH AD KH", # 2 pairs "7C 7S 3S 7H 5S", # 3 of a kind "7C 7S KH 2H 7H", # 3 of a kind "AC KH QH AH AS", # 3 of a kind "2H 4D 3C AS 5S", # straight (low ace) "3C 5C 4C 2C 6H", # straight "6S 8S 7S 5H 9H", # straight "JS QS 9H TS KH", # straight "QC KH TS JS AH", # straight (high ace) "8C 9C 5C 3C TC", # flush "3S 8S 9S 5S KS", # flush "4C 5C 9C 8C KC", # flush "JH 8H AH KH QH", # flush "3D 2H 3H 2C 2D", # full house "2H 2C 3S 3H 3D", # full house "KH KC 3S 3H 3D", # full house "JC 6H JS JD JH", # 4 of a kind "JC 7H JS JD JH", # 4 of a kind "JC KH JS JD JH", # 4 of a kind "2S AS 4S 5S 3S", # straight flush (low ace) "2D 6D 3D 4D 5D", # straight flush "5C 6C 3C 7C 4C", # straight flush "JH 9H TH KH QH", # straight flush "JH AH TH KH QH", # royal flush (high ace straight flush) ) _a : str = ( ("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"), ("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"), ("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"), ("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"), ("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"), ("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"), ("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"), ("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"), ("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"), ("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"), ("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"), ("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"), ("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"), ("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"), ("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"), ("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"), ("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"), ("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"), ("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"), ("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"), ("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"), ("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"), ("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"), ("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"), ("AH AD KS KC AC", "AH KD KH AC KC", "Win"), ("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"), ("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"), ("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"), ("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"), ("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"), ("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"), ("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"), ("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"), ) _a : int = ( ("2H 3H 4H 5H 6H", True), ("AS AH 2H AD AC", False), ("2H 3H 5H 6H 7H", True), ("KS AS TS QS JS", True), ("8H 9H QS JS TH", False), ("AS 3S 4S 8S 2S", True), ) _a : int = ( ("2H 3H 4H 5H 6H", True), ("AS AH 2H AD AC", False), ("2H 3H 5H 6H 7H", False), ("KS AS TS QS JS", True), ("8H 9H QS JS TH", True), ) _a : Optional[int] = ( ("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]), ("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]), ("JH QD KC AS TS", False, [14, 13, 12, 11, 10]), ("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]), ) _a : Union[str, Any] = ( ("JH AH TH KH QH", 0), ("JH 9H TH KH QH", 0), ("JC KH JS JD JH", 7), ("KH KC 3S 3H 3D", 6), ("8C 9C 5C 3C TC", 0), ("JS QS 9H TS KH", 0), ("7C 7S KH 2H 7H", 3), ("3C KH 5D 5S KH", 2), ("QH 8H KD JH 8S", 1), ("2D 6D 9D TH 7D", 0), ) _a : Any = ( ("JH AH TH KH QH", 23), ("JH 9H TH KH QH", 22), ("JC KH JS JD JH", 21), ("KH KC 3S 3H 3D", 20), ("8C 9C 5C 3C TC", 19), ("JS QS 9H TS KH", 18), ("7C 7S KH 2H 7H", 17), ("3C KH 5D 5S KH", 16), ("QH 8H KD JH 8S", 15), ("2D 6D 9D TH 7D", 14), ) def _a () -> str: """simple docstring""" __snake_case , __snake_case = randrange(len(lowercase__ ) ), randrange(len(lowercase__ ) ) __snake_case = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)] __snake_case , __snake_case = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def _a (lowercase__ : int = 1_0_0 ) -> Optional[int]: """simple docstring""" return (generate_random_hand() for _ in range(lowercase__ )) @pytest.mark.parametrize('hand, expected' , lowercase__ ) def _a (lowercase__ : Optional[int] , lowercase__ : Any ) -> Dict: """simple docstring""" assert PokerHand(lowercase__ )._is_flush() == expected @pytest.mark.parametrize('hand, expected' , lowercase__ ) def _a (lowercase__ : int , lowercase__ : List[Any] ) -> List[Any]: """simple docstring""" assert PokerHand(lowercase__ )._is_straight() == expected @pytest.mark.parametrize('hand, expected, card_values' , lowercase__ ) def _a (lowercase__ : int , lowercase__ : Tuple , lowercase__ : List[str] ) -> str: """simple docstring""" __snake_case = PokerHand(lowercase__ ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('hand, expected' , lowercase__ ) def _a (lowercase__ : str , lowercase__ : int ) -> Any: """simple docstring""" assert PokerHand(lowercase__ )._is_same_kind() == expected @pytest.mark.parametrize('hand, expected' , lowercase__ ) def _a (lowercase__ : List[str] , lowercase__ : str ) -> Union[str, Any]: """simple docstring""" assert PokerHand(lowercase__ )._hand_type == expected @pytest.mark.parametrize('hand, other, expected' , lowercase__ ) def _a (lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : Union[str, Any] ) -> str: """simple docstring""" assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected @pytest.mark.parametrize('hand, other, expected' , generate_random_hands() ) def _a (lowercase__ : int , lowercase__ : int , lowercase__ : List[str] ) -> str: """simple docstring""" assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected def _a () -> Any: """simple docstring""" __snake_case = [PokerHand(lowercase__ ) for hand in SORTED_HANDS] __snake_case = poker_hands.copy() shuffle(lowercase__ ) __snake_case = chain(sorted(lowercase__ ) ) for index, hand in enumerate(lowercase__ ): assert hand == poker_hands[index] def _a () -> str: """simple docstring""" # Test that five high straights are compared correctly. __snake_case = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )] pokerhands.sort(reverse=lowercase__ ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def _a () -> Dict: """simple docstring""" # Multiple calls to five_high_straight function should still return True # and shouldn't mutate the list in every call other than the first. __snake_case = PokerHand('2C 4S AS 3D 5C' ) __snake_case = True __snake_case = [5, 4, 3, 2, 1_4] for _ in range(1_0 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def _a () -> Any: """simple docstring""" # Problem number 54 from Project Euler # Testing from poker_hands.txt file __snake_case = 0 __snake_case = os.path.abspath(os.path.dirname(lowercase__ ) ) __snake_case = os.path.join(lowercase__ , 'poker_hands.txt' ) with open(lowercase__ ) as file_hand: for line in file_hand: __snake_case = line[:1_4].strip() __snake_case = line[1_5:].strip() __snake_case , __snake_case = PokerHand(lowercase__ ), PokerHand(lowercase__ ) __snake_case = player.compare_with(lowercase__ ) if output == "Win": answer += 1 assert answer == 3_7_6
56
import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ ( UpperCamelCase__ ): def __init__( self :Union[str, Any] , __A :Optional[int] , __A :Tuple=13 , __A :Dict=7 , __A :Dict=True , __A :str=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Any=False , __A :Dict=False , __A :Any=False , __A :Tuple=2 , __A :Dict=99 , __A :Optional[Any]=0 , __A :List[str]=32 , __A :Optional[int]=5 , __A :Dict=4 , __A :List[str]=0.1 , __A :Union[str, Any]=0.1 , __A :Tuple=512 , __A :Any=12 , __A :Optional[int]=2 , __A :Union[str, Any]=0.0_2 , __A :Dict=3 , __A :Optional[int]=4 , __A :Any="last" , __A :List[Any]=None , __A :Any=None , ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = parent SCREAMING_SNAKE_CASE__ = batch_size SCREAMING_SNAKE_CASE__ = seq_length SCREAMING_SNAKE_CASE__ = is_training SCREAMING_SNAKE_CASE__ = use_input_lengths SCREAMING_SNAKE_CASE__ = use_token_type_ids SCREAMING_SNAKE_CASE__ = use_labels SCREAMING_SNAKE_CASE__ = gelu_activation SCREAMING_SNAKE_CASE__ = sinusoidal_embeddings SCREAMING_SNAKE_CASE__ = causal SCREAMING_SNAKE_CASE__ = asm SCREAMING_SNAKE_CASE__ = n_langs SCREAMING_SNAKE_CASE__ = vocab_size SCREAMING_SNAKE_CASE__ = n_special SCREAMING_SNAKE_CASE__ = hidden_size SCREAMING_SNAKE_CASE__ = num_hidden_layers SCREAMING_SNAKE_CASE__ = num_attention_heads SCREAMING_SNAKE_CASE__ = hidden_dropout_prob SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ = max_position_embeddings SCREAMING_SNAKE_CASE__ = type_vocab_size SCREAMING_SNAKE_CASE__ = type_sequence_label_size SCREAMING_SNAKE_CASE__ = initializer_range SCREAMING_SNAKE_CASE__ = num_labels SCREAMING_SNAKE_CASE__ = num_choices SCREAMING_SNAKE_CASE__ = summary_type SCREAMING_SNAKE_CASE__ = use_proj SCREAMING_SNAKE_CASE__ = scope def _snake_case ( self :Optional[Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE__ = None if self.use_input_lengths: SCREAMING_SNAKE_CASE__ = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length SCREAMING_SNAKE_CASE__ = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None if self.use_labels: SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , 2 ).float() SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE__ = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _snake_case ( self :List[str] ) -> Optional[int]: """simple docstring""" return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def _snake_case ( self :Tuple , __A :str , __A :int , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[int] , __A :Union[str, Any] , __A :Union[str, Any] , __A :str , ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertModel(config=__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A , lengths=__A , langs=__A ) SCREAMING_SNAKE_CASE__ = model(__A , langs=__A ) SCREAMING_SNAKE_CASE__ = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self :str , __A :Any , __A :str , __A :Union[str, Any] , __A :Optional[Any] , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[Any] , __A :Union[str, Any] , ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertWithLMHeadModel(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self :Tuple , __A :Union[str, Any] , __A :Optional[Any] , __A :Dict , __A :Dict , __A :Union[str, Any] , __A :List[str] , __A :Optional[int] , __A :int , __A :str , ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnsweringSimple(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A ) SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self :List[str] , __A :Any , __A :int , __A :Tuple , __A :Optional[Any] , __A :Tuple , __A :Optional[int] , __A :str , __A :int , __A :str , ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnswering(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A ) SCREAMING_SNAKE_CASE__ = model( __A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , p_mask=__A , ) SCREAMING_SNAKE_CASE__ = model( __A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , ) ((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple() SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A ) ((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def _snake_case ( self :Optional[int] , __A :str , __A :Optional[int] , __A :Tuple , __A :Dict , __A :List[str] , __A :Tuple , __A :List[str] , __A :Dict , __A :List[str] , ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertForSequenceClassification(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A ) SCREAMING_SNAKE_CASE__ = model(__A , labels=__A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _snake_case ( self :Optional[Any] , __A :Optional[Any] , __A :Optional[Any] , __A :List[str] , __A :Optional[Any] , __A :int , __A :Tuple , __A :Optional[int] , __A :Union[str, Any] , __A :Dict , ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.num_labels SCREAMING_SNAKE_CASE__ = FlaubertForTokenClassification(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self :str , __A :Any , __A :Tuple , __A :List[str] , __A :Tuple , __A :Any , __A :int , __A :Dict , __A :List[str] , __A :Tuple , ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.num_choices SCREAMING_SNAKE_CASE__ = FlaubertForMultipleChoice(config=__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ = model( __A , attention_mask=__A , token_type_ids=__A , labels=__A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _snake_case ( self :Union[str, Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) = config_and_inputs SCREAMING_SNAKE_CASE__ = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): lowerCamelCase_ = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) lowerCamelCase_ = ( { "feature-extraction": FlaubertModel, "fill-mask": FlaubertWithLMHeadModel, "question-answering": FlaubertForQuestionAnsweringSimple, "text-classification": FlaubertForSequenceClassification, "token-classification": FlaubertForTokenClassification, "zero-shot": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def _snake_case ( self :Any , __A :Optional[int] , __A :Optional[int] , __A :Dict , __A :List[Any] , __A :Tuple ) -> str: """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _snake_case ( self :Tuple , __A :List[str] , __A :Optional[int] , __A :Dict=False ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(__A , __A , return_labels=__A ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": SCREAMING_SNAKE_CASE__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__A ) SCREAMING_SNAKE_CASE__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__A ) return inputs_dict def _snake_case ( self :str ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertModelTester(self ) SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__A , emb_dim=37 ) def _snake_case ( self :int ) -> int: """simple docstring""" self.config_tester.run_common_tests() def _snake_case ( self :Optional[Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*__A ) def _snake_case ( self :Tuple ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*__A ) def _snake_case ( self :str ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*__A ) def _snake_case ( self :Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*__A ) def _snake_case ( self :str ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*__A ) def _snake_case ( self :Any ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*__A ) def _snake_case ( self :Any ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*__A ) @slow def _snake_case ( self :Union[str, Any] ) -> List[str]: """simple docstring""" for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @slow @require_torch_gpu def _snake_case ( self :Tuple ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = model_class(config=__A ) SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__A , __A ) SCREAMING_SNAKE_CASE__ = torch.jit.trace( __A , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__A , os.path.join(__A , """traced_model.pt""" ) ) SCREAMING_SNAKE_CASE__ = torch.jit.load(os.path.join(__A , """traced_model.pt""" ) , map_location=__A ) loaded(inputs_dict["""input_ids"""].to(__A ) , inputs_dict["""attention_mask"""].to(__A ) ) @require_torch class UpperCamelCase_ ( unittest.TestCase ): @slow def _snake_case ( self :Dict ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" ) SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) with torch.no_grad(): SCREAMING_SNAKE_CASE__ = model(__A )[0] SCREAMING_SNAKE_CASE__ = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __A ) SCREAMING_SNAKE_CASE__ = torch.tensor( [[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1E-4 ) )
6
0
import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 A_ : Optional[Any] = data_utils.TransfoXLTokenizer A_ : Union[str, Any] = data_utils.TransfoXLCorpus A_ : Any = data_utils A_ : Optional[Any] = data_utils def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]: if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(UpperCAmelCase__ , 'rb' ) as fp: UpperCamelCase_: Union[str, Any] = pickle.load(UpperCAmelCase__ , encoding='latin1' ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) UpperCamelCase_: Any = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file'] print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' ) UpperCamelCase_: Union[str, Any] = corpus.vocab.__dict__ torch.save(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCamelCase_: str = corpus.__dict__ corpus_dict_no_vocab.pop('vocab' , UpperCAmelCase__ ) UpperCamelCase_: str = pytorch_dump_folder_path + '/' + CORPUS_NAME print(F'''Save dataset to {pytorch_dataset_dump_path}''' ) torch.save(UpperCAmelCase__ , UpperCAmelCase__ ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model UpperCamelCase_: Any = os.path.abspath(UpperCAmelCase__ ) UpperCamelCase_: Dict = os.path.abspath(UpperCAmelCase__ ) print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' ) # Initialise PyTorch model if transfo_xl_config_file == "": UpperCamelCase_: List[str] = TransfoXLConfig() else: UpperCamelCase_: Optional[int] = TransfoXLConfig.from_json_file(UpperCAmelCase__ ) print(F'''Building PyTorch model from configuration: {config}''' ) UpperCamelCase_: Union[str, Any] = TransfoXLLMHeadModel(UpperCAmelCase__ ) UpperCamelCase_: Tuple = load_tf_weights_in_transfo_xl(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # Save pytorch-model UpperCamelCase_: str = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) UpperCamelCase_: Union[str, Any] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) print(F'''Save PyTorch model to {os.path.abspath(UpperCAmelCase__ )}''' ) torch.save(model.state_dict() , UpperCAmelCase__ ) print(F'''Save configuration file to {os.path.abspath(UpperCAmelCase__ )}''' ) with open(UpperCAmelCase__ , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": A_ : int = argparse.ArgumentParser() parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the folder to store the PyTorch model or dataset/vocab.', ) parser.add_argument( '--tf_checkpoint_path', default='', type=str, help='An optional path to a TensorFlow checkpoint path to be converted.', ) parser.add_argument( '--transfo_xl_config_file', default='', type=str, help=( 'An optional config json file corresponding to the pre-trained BERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--transfo_xl_dataset_file', default='', type=str, help='An optional dataset file to be converted in a vocabulary.', ) A_ : Tuple = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
57
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Union[str, Any] ): for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})''' def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple=True ): model.train() SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = F.mse_loss(UpperCamelCase__ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: List[Any]=False ): set_seed(42 ) SCREAMING_SNAKE_CASE__ = RegressionModel() SCREAMING_SNAKE_CASE__ = deepcopy(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 ) SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 ) model.to(accelerator.device ) if sched: SCREAMING_SNAKE_CASE__ = AdamW(params=model.parameters() , lr=1e-3 ) SCREAMING_SNAKE_CASE__ = AdamW(params=ddp_model.parameters() , lr=1e-3 ) SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 ) SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 ) # Make a copy of `model` if sched: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ): # Test when on a single CPU or GPU that the context manager does nothing SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ ) # Use a single batch SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: # Sync grads step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )] def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ): # Test on distributed setup that context manager behaves properly SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ ) # Use a single batch SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: # Sync grads step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )] def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int=False , UpperCamelCase__: Union[str, Any]=False ): SCREAMING_SNAKE_CASE__ = Accelerator( split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ ) for iteration, batch in enumerate(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values() # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCamelCase__ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )] GradientState._reset_state() def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple=False , UpperCamelCase__: List[str]=False ): SCREAMING_SNAKE_CASE__ = Accelerator( split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ , UpperCamelCase__ ) for iteration, batch in enumerate(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values() # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCamelCase__ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n''' SCREAMING_SNAKE_CASE__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCamelCase__ )) if accelerator.num_processes > 1: check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) GradientState._reset_state() def SCREAMING_SNAKE_CASE__ ( ): SCREAMING_SNAKE_CASE__ = Accelerator() SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 ) SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 ) SCREAMING_SNAKE_CASE__ = RegressionDataset(length=96 ) SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(UpperCamelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ ) if iteration < len(UpperCamelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(UpperCamelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ ) if batch_num < len(UpperCamelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def SCREAMING_SNAKE_CASE__ ( ): SCREAMING_SNAKE_CASE__ = Accelerator() SCREAMING_SNAKE_CASE__ = accelerator.state if state.local_process_index == 0: print("""**Test `accumulate` gradient accumulation with dataloader break**""" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("""**Test NOOP `no_sync` context manager**""" ) test_noop_sync(UpperCamelCase__ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("""**Test Distributed `no_sync` context manager**""" ) test_distributed_sync(UpperCamelCase__ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation(UpperCamelCase__ , UpperCamelCase__ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation_with_opt_and_scheduler(UpperCamelCase__ , UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
6
0
"""simple docstring""" import math import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from .attention_processor import Attention from .embeddings import get_timestep_embedding from .modeling_utils import ModelMixin class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): """simple docstring""" @register_to_config def __init__( self , _lowercase = 1_2_8 , _lowercase = 2_5_6 , _lowercase = 2000.0 , _lowercase = 7_6_8 , _lowercase = 1_2 , _lowercase = 1_2 , _lowercase = 6_4 , _lowercase = 2_0_4_8 , _lowercase = 0.1 , ) -> Dict: '''simple docstring''' super().__init__() snake_case_ : Optional[Any] = nn.Sequential( nn.Linear(_lowercase , d_model * 4 , bias=_lowercase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_lowercase ) , nn.SiLU() , ) snake_case_ : Any = nn.Embedding(_lowercase , _lowercase ) snake_case_ : Union[str, Any] = False snake_case_ : List[Any] = nn.Linear(_lowercase , _lowercase , bias=_lowercase ) snake_case_ : Union[str, Any] = nn.Dropout(p=_lowercase ) snake_case_ : Tuple = nn.ModuleList() for lyr_num in range(_lowercase ): # FiLM conditional T5 decoder snake_case_ : Union[str, Any] = DecoderLayer(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase ) self.decoders.append(_lowercase ) snake_case_ : List[Any] = TaLayerNorm(_lowercase ) snake_case_ : Optional[Any] = nn.Dropout(p=_lowercase ) snake_case_ : List[Any] = nn.Linear(_lowercase , _lowercase , bias=_lowercase ) def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> Optional[Any]: '''simple docstring''' snake_case_ : Optional[int] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) ) return mask.unsqueeze(-3 ) def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> List[Any]: '''simple docstring''' snake_case_ , snake_case_ , snake_case_ : str = decoder_input_tokens.shape assert decoder_noise_time.shape == (batch,) # decoder_noise_time is in [0, 1), so rescale to expected timing range. snake_case_ : Optional[int] = get_timestep_embedding( decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype ) snake_case_ : int = self.conditioning_emb(_lowercase ).unsqueeze(1 ) assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) snake_case_ : Tuple = decoder_input_tokens.shape[1] # If we want to use relative positions for audio context, we can just offset # this sequence by the length of encodings_and_masks. snake_case_ : Dict = torch.broadcast_to( torch.arange(_lowercase , device=decoder_input_tokens.device ) , (batch, seq_length) , ) snake_case_ : Tuple = self.position_encoding(_lowercase ) snake_case_ : Optional[Any] = self.continuous_inputs_projection(_lowercase ) inputs += position_encodings snake_case_ : List[Any] = self.dropout(_lowercase ) # decoder: No padding present. snake_case_ : Tuple = torch.ones( decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype ) # Translate encoding masks to encoder-decoder masks. snake_case_ : int = [(x, self.encoder_decoder_mask(_lowercase , _lowercase )) for x, y in encodings_and_masks] # cross attend style: concat encodings snake_case_ : Optional[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 ) snake_case_ : str = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 ) for lyr in self.decoders: snake_case_ : int = lyr( _lowercase , conditioning_emb=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )[0] snake_case_ : int = self.decoder_norm(_lowercase ) snake_case_ : Union[str, Any] = self.post_dropout(_lowercase ) snake_case_ : int = self.spec_out(_lowercase ) return spec_out class _lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=1E-6 ) -> Union[str, Any]: '''simple docstring''' super().__init__() snake_case_ : Any = nn.ModuleList() # cond self attention: layer 0 self.layer.append( TaLayerSelfAttentionCond(d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase ) ) # cross attention: layer 1 self.layer.append( TaLayerCrossAttention( d_model=_lowercase , d_kv=_lowercase , num_heads=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase , ) ) # Film Cond MLP + dropout: last layer self.layer.append( TaLayerFFCond(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase , layer_norm_epsilon=_lowercase ) ) def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> List[Any]: '''simple docstring''' snake_case_ : Tuple = self.layer[0]( _lowercase , conditioning_emb=_lowercase , attention_mask=_lowercase , ) if encoder_hidden_states is not None: snake_case_ : Tuple = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to( encoder_hidden_states.dtype ) snake_case_ : str = self.layer[1]( _lowercase , key_value_states=_lowercase , attention_mask=_lowercase , ) # Apply Film Conditional Feed Forward layer snake_case_ : Any = self.layer[-1](_lowercase , _lowercase ) return (hidden_states,) class _lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> str: '''simple docstring''' super().__init__() snake_case_ : Any = TaLayerNorm(_lowercase ) snake_case_ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase ) snake_case_ : Union[str, Any] = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase ) snake_case_ : List[Any] = nn.Dropout(_lowercase ) def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , ) -> Optional[Any]: '''simple docstring''' snake_case_ : Dict = self.layer_norm(_lowercase ) if conditioning_emb is not None: snake_case_ : str = self.FiLMLayer(_lowercase , _lowercase ) # Self-attention block snake_case_ : List[Any] = self.attention(_lowercase ) snake_case_ : List[str] = hidden_states + self.dropout(_lowercase ) return hidden_states class _lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]: '''simple docstring''' super().__init__() snake_case_ : List[Any] = Attention(query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , out_bias=_lowercase , scale_qk=_lowercase ) snake_case_ : Union[str, Any] = TaLayerNorm(_lowercase , eps=_lowercase ) snake_case_ : Optional[Any] = nn.Dropout(_lowercase ) def UpperCAmelCase__ ( self , _lowercase , _lowercase=None , _lowercase=None , ) -> Optional[int]: '''simple docstring''' snake_case_ : List[Any] = self.layer_norm(_lowercase ) snake_case_ : Optional[Any] = self.attention( _lowercase , encoder_hidden_states=_lowercase , attention_mask=attention_mask.squeeze(1 ) , ) snake_case_ : Any = hidden_states + self.dropout(_lowercase ) return layer_output class _lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase ) -> Dict: '''simple docstring''' super().__init__() snake_case_ : Tuple = TaDenseGatedActDense(d_model=_lowercase , d_ff=_lowercase , dropout_rate=_lowercase ) snake_case_ : List[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowercase ) snake_case_ : Optional[int] = TaLayerNorm(_lowercase , eps=_lowercase ) snake_case_ : Tuple = nn.Dropout(_lowercase ) def UpperCAmelCase__ ( self , _lowercase , _lowercase=None ) -> str: '''simple docstring''' snake_case_ : List[Any] = self.layer_norm(_lowercase ) if conditioning_emb is not None: snake_case_ : Optional[int] = self.film(_lowercase , _lowercase ) snake_case_ : int = self.DenseReluDense(_lowercase ) snake_case_ : Optional[Any] = hidden_states + self.dropout(_lowercase ) return hidden_states class _lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self , _lowercase , _lowercase , _lowercase ) -> Optional[int]: '''simple docstring''' super().__init__() snake_case_ : Optional[int] = nn.Linear(_lowercase , _lowercase , bias=_lowercase ) snake_case_ : Optional[int] = nn.Linear(_lowercase , _lowercase , bias=_lowercase ) snake_case_ : Any = nn.Linear(_lowercase , _lowercase , bias=_lowercase ) snake_case_ : int = nn.Dropout(_lowercase ) snake_case_ : Optional[int] = NewGELUActivation() def UpperCAmelCase__ ( self , _lowercase ) -> int: '''simple docstring''' snake_case_ : str = self.act(self.wi_a(_lowercase ) ) snake_case_ : Dict = self.wi_a(_lowercase ) snake_case_ : Any = hidden_gelu * hidden_linear snake_case_ : List[Any] = self.dropout(_lowercase ) snake_case_ : Tuple = self.wo(_lowercase ) return hidden_states class _lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self , _lowercase , _lowercase=1E-6 ) -> str: '''simple docstring''' super().__init__() snake_case_ : Union[str, Any] = nn.Parameter(torch.ones(_lowercase ) ) snake_case_ : int = eps def UpperCAmelCase__ ( self , _lowercase ) -> List[Any]: '''simple docstring''' snake_case_ : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_lowercase ) snake_case_ : Any = hidden_states * torch.rsqrt(variance + self.variance_epsilon ) # convert into half-precision if necessary if self.weight.dtype in [torch.floataa, torch.bfloataa]: snake_case_ : str = hidden_states.to(self.weight.dtype ) return self.weight * hidden_states class _lowerCAmelCase ( nn.Module ): """simple docstring""" def UpperCAmelCase__ ( self , _lowercase ) -> torch.Tensor: '''simple docstring''' return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(_lowercase , 3.0 )) )) class _lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self , _lowercase , _lowercase ) -> Any: '''simple docstring''' super().__init__() snake_case_ : List[Any] = nn.Linear(_lowercase , out_features * 2 , bias=_lowercase ) def UpperCAmelCase__ ( self , _lowercase , _lowercase ) -> List[Any]: '''simple docstring''' snake_case_ : List[Any] = self.scale_bias(_lowercase ) snake_case_ , snake_case_ : Any = torch.chunk(_lowercase , 2 , -1 ) snake_case_ : Optional[Any] = x * (1 + scale) + shift return x
58
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = ["image_processor", "tokenizer"] lowerCamelCase_ = "AutoImageProcessor" lowerCamelCase_ = "AutoTokenizer" def __init__( self :Optional[int] , __A :Optional[Any] , __A :Dict ) -> Dict: """simple docstring""" super().__init__(__A , __A ) SCREAMING_SNAKE_CASE__ = self.image_processor def __call__( self :int , __A :str=None , __A :int=None , __A :Union[str, Any]=None , **__A :str ) -> Optional[Any]: """simple docstring""" if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , return_tensors=__A , **__A ) if images is not None: SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A ) if text is not None and images is not None: SCREAMING_SNAKE_CASE__ = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__A ) , tensor_type=__A ) def _snake_case ( self :str , *__A :List[str] , **__A :List[str] ) -> List[Any]: """simple docstring""" return self.tokenizer.batch_decode(*__A , **__A ) def _snake_case ( self :List[str] , *__A :Any , **__A :Any ) -> Tuple: """simple docstring""" return self.tokenizer.decode(*__A , **__A ) @property def _snake_case ( self :Dict ) -> List[Any]: """simple docstring""" return ["input_ids", "attention_mask", "pixel_values"]
6
0
import numpy as np def lowerCAmelCase_ ( __a ) -> np.array: """simple docstring""" return 1 / (1 + np.exp(-vector )) def lowerCAmelCase_ ( __a ) -> np.array: """simple docstring""" return vector * sigmoid(1.7_0_2 * vector ) if __name__ == "__main__": import doctest doctest.testmod()
59
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ): SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = sum(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): SCREAMING_SNAKE_CASE__ = True for i in range(1 , s + 1 ): SCREAMING_SNAKE_CASE__ = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): SCREAMING_SNAKE_CASE__ = dp[i][j - 1] if arr[i - 1] <= j: SCREAMING_SNAKE_CASE__ = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: SCREAMING_SNAKE_CASE__ = s - 2 * j break return diff
6
0
import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa lowerCAmelCase_ = logging.getLogger(__name__) class __lowerCAmelCase ( _a ): lowerCamelCase_ : int = '''summarization''' lowerCamelCase_ : int = ['''loss'''] lowerCamelCase_ : List[str] = ROUGE_KEYS lowerCamelCase_ : Tuple = '''rouge2''' def __init__(self , __magic_name__ , **__magic_name__ ) -> Dict: '''simple docstring''' if hparams.sortish_sampler and hparams.gpus > 1: snake_case_ : List[Any] = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' ) if hparams.sortish_sampler: raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' ) super().__init__(__magic_name__ , num_labels=__magic_name__ , mode=self.mode , **__magic_name__ ) use_task_specific_params(self.model , '''summarization''' ) save_git_info(self.hparams.output_dir ) snake_case_ : Union[str, Any] = Path(self.output_dir ) / '''metrics.json''' snake_case_ : Union[str, Any] = Path(self.output_dir ) / '''hparams.pkl''' pickle_save(self.hparams , self.hparams_save_path ) snake_case_ : Any = 0 snake_case_ : Any = defaultdict(__magic_name__ ) snake_case_ : List[Any] = self.config.model_type snake_case_ : str = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size snake_case_ : dict = { "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": self.model.config.prefix or "", } snake_case_ : List[Any] = { '''train''': self.hparams.n_train, '''val''': self.hparams.n_val, '''test''': self.hparams.n_test, } snake_case_ : str = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} snake_case_ : Dict = { '''train''': self.hparams.max_target_length, '''val''': self.hparams.val_max_target_length, '''test''': self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}''' assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}''' if self.hparams.freeze_embeds: freeze_embeds(self.model ) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder() ) assert_all_frozen(self.model.get_encoder() ) snake_case_ : Tuple = get_git_info()['''repo_sha'''] snake_case_ : str = hparams.num_workers snake_case_ : List[Any] = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , __magic_name__ ): snake_case_ : str = self.tokenizer.lang_code_to_id[hparams.tgt_lang] snake_case_ : Any = self.decoder_start_token_id snake_case_ : Dict = ( SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset ) snake_case_ : str = False snake_case_ : List[Any] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: snake_case_ : Any = self.hparams.eval_max_gen_length else: snake_case_ : Optional[Any] = self.model.config.max_length snake_case_ : Union[str, Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def lowerCamelCase (self , __magic_name__ ) -> Dict[str, List[str]]: '''simple docstring''' snake_case_ : Tuple = { k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items() } save_json(__magic_name__ , Path(self.output_dir ) / '''text_batch.json''' ) save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' ) snake_case_ : int = True return readable_batch def lowerCamelCase (self , __magic_name__ , **__magic_name__ ) -> Tuple: '''simple docstring''' return self.model(__magic_name__ , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> Any: '''simple docstring''' snake_case_ : Union[str, Any] = self.tokenizer.batch_decode( __magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ ) return lmap(str.strip , __magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> Tuple: '''simple docstring''' snake_case_ : int = self.tokenizer.pad_token_id snake_case_ , snake_case_ : Tuple = batch['''input_ids'''], batch['''attention_mask'''] snake_case_ : Dict = batch['''labels'''] if isinstance(self.model , __magic_name__ ): snake_case_ : Tuple = self.model._shift_right(__magic_name__ ) else: snake_case_ : Any = shift_tokens_right(__magic_name__ , __magic_name__ ) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero snake_case_ : Union[str, Any] = decoder_input_ids self.save_readable_batch(__magic_name__ ) snake_case_ : List[Any] = self(__magic_name__ , attention_mask=__magic_name__ , decoder_input_ids=__magic_name__ , use_cache=__magic_name__ ) snake_case_ : List[Any] = outputs['''logits'''] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id snake_case_ : Dict = nn.CrossEntropyLoss(ignore_index=__magic_name__ ) assert lm_logits.shape[-1] == self.vocab_size snake_case_ : List[str] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) ) else: snake_case_ : Union[str, Any] = nn.functional.log_softmax(__magic_name__ , dim=-1 ) snake_case_ , snake_case_ : List[Any] = label_smoothed_nll_loss( __magic_name__ , __magic_name__ , self.hparams.label_smoothing , ignore_index=__magic_name__ ) return (loss,) @property def lowerCamelCase (self ) -> int: '''simple docstring''' return self.tokenizer.pad_token_id def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Dict: '''simple docstring''' snake_case_ : Any = self._step(__magic_name__ ) snake_case_ : Union[str, Any] = dict(zip(self.loss_names , __magic_name__ ) ) # tokens per batch snake_case_ : Union[str, Any] = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum() snake_case_ : Union[str, Any] = batch['''input_ids'''].shape[0] snake_case_ : Any = batch['''input_ids'''].eq(self.pad ).sum() snake_case_ : Optional[int] = batch['''input_ids'''].eq(self.pad ).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Dict: '''simple docstring''' return self._generative_step(__magic_name__ ) def lowerCamelCase (self , __magic_name__ , __magic_name__="val" ) -> Dict: '''simple docstring''' self.step_count += 1 snake_case_ : str = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names} snake_case_ : Dict = losses['''loss'''] snake_case_ : int = { k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len'''] } snake_case_ : Tuple = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) snake_case_ : torch.FloatTensor = torch.tensor(__magic_name__ ).type_as(__magic_name__ ) generative_metrics.update({k: v.item() for k, v in losses.items()} ) losses.update(__magic_name__ ) snake_case_ : int = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()} snake_case_ : Union[str, Any] = self.step_count self.metrics[prefix].append(__magic_name__ ) # callback writes this to self.metrics_save_path snake_case_ : Union[str, Any] = flatten_list([x['''preds'''] for x in outputs] ) return { "log": all_metrics, "preds": preds, F'''{prefix}_loss''': loss, F'''{prefix}_{self.val_metric}''': metric_tensor, } def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Dict: '''simple docstring''' return calculate_rouge(__magic_name__ , __magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> dict: '''simple docstring''' snake_case_ : int = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') snake_case_ : List[Any] = self.model.generate( batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=__magic_name__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , ) snake_case_ : str = (time.time() - ta) / batch['''input_ids'''].shape[0] snake_case_ : List[str] = self.ids_to_clean_text(__magic_name__ ) snake_case_ : List[str] = self.ids_to_clean_text(batch['''labels'''] ) snake_case_ : Any = self._step(__magic_name__ ) snake_case_ : List[str] = dict(zip(self.loss_names , __magic_name__ ) ) snake_case_ : Dict = self.calc_generative_metrics(__magic_name__ , __magic_name__ ) snake_case_ : Any = np.mean(lmap(__magic_name__ , __magic_name__ ) ) base_metrics.update(gen_time=__magic_name__ , gen_len=__magic_name__ , preds=__magic_name__ , target=__magic_name__ , **__magic_name__ ) return base_metrics def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Optional[int]: '''simple docstring''' return self._generative_step(__magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> Optional[Any]: '''simple docstring''' return self.validation_epoch_end(__magic_name__ , prefix='''test''' ) def lowerCamelCase (self , __magic_name__ ) -> SeqaSeqDataset: '''simple docstring''' snake_case_ : Dict = self.n_obs[type_path] snake_case_ : Tuple = self.target_lens[type_path] snake_case_ : Tuple = self.dataset_class( self.tokenizer , type_path=__magic_name__ , n_obs=__magic_name__ , max_target_length=__magic_name__ , **self.dataset_kwargs , ) return dataset def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ = False ) -> DataLoader: '''simple docstring''' snake_case_ : Tuple = self.get_dataset(__magic_name__ ) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": snake_case_ : Union[str, Any] = dataset.make_sortish_sampler(__magic_name__ , distributed=self.hparams.gpus > 1 ) return DataLoader( __magic_name__ , batch_size=__magic_name__ , collate_fn=dataset.collate_fn , shuffle=__magic_name__ , num_workers=self.num_workers , sampler=__magic_name__ , ) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": snake_case_ : Tuple = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 ) return DataLoader( __magic_name__ , batch_sampler=__magic_name__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , ) else: return DataLoader( __magic_name__ , batch_size=__magic_name__ , collate_fn=dataset.collate_fn , shuffle=__magic_name__ , num_workers=self.num_workers , sampler=__magic_name__ , ) def lowerCamelCase (self ) -> DataLoader: '''simple docstring''' snake_case_ : List[str] = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=__magic_name__ ) return dataloader def lowerCamelCase (self ) -> DataLoader: '''simple docstring''' return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size ) def lowerCamelCase (self ) -> DataLoader: '''simple docstring''' return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size ) @staticmethod def lowerCamelCase (__magic_name__ , __magic_name__ ) -> Optional[Any]: '''simple docstring''' BaseTransformer.add_model_specific_args(__magic_name__ , __magic_name__ ) add_generic_args(__magic_name__ , __magic_name__ ) parser.add_argument( '''--max_source_length''' , default=1024 , type=__magic_name__ , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--max_target_length''' , default=56 , type=__magic_name__ , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--val_max_target_length''' , default=142 , type=__magic_name__ , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--test_max_target_length''' , default=142 , type=__magic_name__ , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument('''--freeze_encoder''' , action='''store_true''' ) parser.add_argument('''--freeze_embeds''' , action='''store_true''' ) parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=__magic_name__ ) parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=__magic_name__ ) parser.add_argument('''--max_tokens_per_batch''' , type=__magic_name__ , default=__magic_name__ ) parser.add_argument('''--logger_name''' , type=__magic_name__ , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' ) parser.add_argument('''--n_train''' , type=__magic_name__ , default=-1 , required=__magic_name__ , help='''# examples. -1 means use all.''' ) parser.add_argument('''--n_val''' , type=__magic_name__ , default=500 , required=__magic_name__ , help='''# examples. -1 means use all.''' ) parser.add_argument('''--n_test''' , type=__magic_name__ , default=-1 , required=__magic_name__ , help='''# examples. -1 means use all.''' ) parser.add_argument( '''--task''' , type=__magic_name__ , default='''summarization''' , required=__magic_name__ , help='''# examples. -1 means use all.''' ) parser.add_argument('''--label_smoothing''' , type=__magic_name__ , default=0.0 , required=__magic_name__ ) parser.add_argument('''--src_lang''' , type=__magic_name__ , default='''''' , required=__magic_name__ ) parser.add_argument('''--tgt_lang''' , type=__magic_name__ , default='''''' , required=__magic_name__ ) parser.add_argument('''--eval_beams''' , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ ) parser.add_argument( '''--val_metric''' , type=__magic_name__ , default=__magic_name__ , required=__magic_name__ , choices=['''bleu''', '''rouge2''', '''loss''', None] ) parser.add_argument('''--eval_max_gen_length''' , type=__magic_name__ , default=__magic_name__ , help='''never generate more than n tokens''' ) parser.add_argument('''--save_top_k''' , type=__magic_name__ , default=1 , required=__magic_name__ , help='''How many checkpoints to save''' ) parser.add_argument( '''--early_stopping_patience''' , type=__magic_name__ , default=-1 , required=__magic_name__ , help=( '''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So''' ''' val_check_interval will effect it.''' ) , ) return parser class __lowerCAmelCase ( _a ): lowerCamelCase_ : Dict = '''translation''' lowerCamelCase_ : List[str] = ['''loss'''] lowerCamelCase_ : Any = ['''bleu'''] lowerCamelCase_ : Dict = '''bleu''' def __init__(self , __magic_name__ , **__magic_name__ ) -> List[str]: '''simple docstring''' super().__init__(__magic_name__ , **__magic_name__ ) snake_case_ : Union[str, Any] = hparams.src_lang snake_case_ : Optional[int] = hparams.tgt_lang def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> dict: '''simple docstring''' return calculate_bleu(__magic_name__ , __magic_name__ ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=None ) -> SummarizationModule: """simple docstring""" Path(args.output_dir ).mkdir(exist_ok=_UpperCamelCase ) check_output_dir(_UpperCamelCase , expected_items=3 ) if model is None: if "summarization" in args.task: snake_case_ : SummarizationModule = SummarizationModule(_UpperCamelCase ) else: snake_case_ : SummarizationModule = TranslationModule(_UpperCamelCase ) snake_case_ : int = Path(args.data_dir ).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir ).startswith('''/tmp''' ) or str(args.output_dir ).startswith('''/var''' ) ): snake_case_ : List[str] = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger snake_case_ : Dict = os.environ.get('''WANDB_PROJECT''' , _UpperCamelCase ) snake_case_ : str = WandbLogger(name=model.output_dir.name , project=_UpperCamelCase ) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger snake_case_ : str = WandbLogger(name=model.output_dir.name , project=f'''hf_{dataset}''' ) if args.early_stopping_patience >= 0: snake_case_ : List[str] = get_early_stopping_callback(model.val_metric , args.early_stopping_patience ) else: snake_case_ : Tuple = False snake_case_ : List[str] = args.val_metric == '''loss''' snake_case_ : pl.Trainer = generic_train( _UpperCamelCase , _UpperCamelCase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback( args.output_dir , model.val_metric , args.save_top_k , _UpperCamelCase ) , early_stopping_callback=_UpperCamelCase , logger=_UpperCamelCase , ) pickle_save(model.hparams , model.output_dir / '''hparams.pkl''' ) if not args.do_predict: return model snake_case_ : List[Any] = '''''' snake_case_ : Union[str, Any] = sorted(glob.glob(os.path.join(args.output_dir , '''*.ckpt''' ) , recursive=_UpperCamelCase ) ) if checkpoints: snake_case_ : int = checkpoints[-1] snake_case_ : Tuple = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams ) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() lowerCAmelCase_ = pl.Trainer.add_argparse_args(parser) lowerCAmelCase_ = SummarizationModule.add_model_specific_args(parser, os.getcwd()) lowerCAmelCase_ = parser.parse_args() main(args)
60
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float ): if mass < 0: raise ValueError("""The mass of a body cannot be negative""" ) return 0.5 * mass * abs(UpperCamelCase__ ) * abs(UpperCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
6
0
import cva import numpy as np class __lowerCamelCase : """simple docstring""" def __init__( self : Any , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]: if k in (0.04, 0.06): lowerCAmelCase__ = k lowerCAmelCase__ = window_size else: raise ValueError("invalid k value" ) def __str__( self : List[str] ) -> str: return str(self.k ) def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str ) -> tuple[cva.Mat, list[list[int]]]: lowerCAmelCase__ = cva.imread(SCREAMING_SNAKE_CASE__ , 0 ) lowerCAmelCase__ , lowerCAmelCase__ = img.shape lowerCAmelCase__ = [] lowerCAmelCase__ = img.copy() lowerCAmelCase__ = cva.cvtColor(SCREAMING_SNAKE_CASE__ , cva.COLOR_GRAY2RGB ) lowerCAmelCase__ , lowerCAmelCase__ = np.gradient(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = dx**2 lowerCAmelCase__ = dy**2 lowerCAmelCase__ = dx * dy lowerCAmelCase__ = 0.04 lowerCAmelCase__ = self.window_size // 2 for y in range(SCREAMING_SNAKE_CASE__ , h - offset ): for x in range(SCREAMING_SNAKE_CASE__ , w - offset ): lowerCAmelCase__ = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCAmelCase__ = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCAmelCase__ = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCAmelCase__ = (wxx * wyy) - (wxy**2) lowerCAmelCase__ = wxx + wyy lowerCAmelCase__ = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": UpperCamelCase = HarrisCorner(0.04, 3) UpperCamelCase , UpperCamelCase = edge_detect.detect('path_to_image') cva.imwrite('detect.png', color_img)
61
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = "encoder-decoder" lowerCamelCase_ = True def __init__( self :Optional[int] , **__A :str ) -> int: """simple docstring""" super().__init__(**__A ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" SCREAMING_SNAKE_CASE__ = kwargs.pop("""encoder""" ) SCREAMING_SNAKE_CASE__ = encoder_config.pop("""model_type""" ) SCREAMING_SNAKE_CASE__ = kwargs.pop("""decoder""" ) SCREAMING_SNAKE_CASE__ = decoder_config.pop("""model_type""" ) from ..auto.configuration_auto import AutoConfig SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A ) SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A ) SCREAMING_SNAKE_CASE__ = True @classmethod def _snake_case ( cls :str , __A :PretrainedConfig , __A :PretrainedConfig , **__A :List[str] ) -> PretrainedConfig: """simple docstring""" logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" ) SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__A ) def _snake_case ( self :str ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE__ = self.encoder.to_dict() SCREAMING_SNAKE_CASE__ = self.decoder.to_dict() SCREAMING_SNAKE_CASE__ = self.__class__.model_type return output
6
0
from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def lowerCamelCase__ ( lowercase , lowercase , lowercase = "x" , lowercase = 10**-10 , lowercase = 1 , ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = symbols(lowercase ) SCREAMING_SNAKE_CASE : Tuple = lambdify(lowercase , lowercase ) SCREAMING_SNAKE_CASE : str = lambdify(lowercase , diff(lowercase , lowercase ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = starting_point while True: if diff_function(lowercase ) != 0: SCREAMING_SNAKE_CASE : str = prev_guess - multiplicity * func(lowercase ) / diff_function( lowercase ) else: raise ZeroDivisionError("Could not find root" ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess SCREAMING_SNAKE_CASE : str = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""") # Find root of polynomial # Find fourth Root of 5 print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""") # Find value of e print( """The root of log(y) - 1 = 0 is """, F"""{newton_raphson('log(y) - 1', 2, variable='y')}""", ) # Exponential Roots print( """The root of exp(x) - 1 = 0 is""", F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""", ) # Find root of cos(x) print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
62
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase__ ) class UpperCamelCase_ ( UpperCamelCase__ ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization lowerCamelCase_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) lowerCamelCase_ = Features({"text": Value("string" )} ) lowerCamelCase_ = Features({"labels": ClassLabel} ) lowerCamelCase_ = "text" lowerCamelCase_ = "labels" def _snake_case ( self :Any , __A :Dict ) -> Optional[Any]: """simple docstring""" if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , __A ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) SCREAMING_SNAKE_CASE__ = copy.deepcopy(self ) SCREAMING_SNAKE_CASE__ = self.label_schema.copy() SCREAMING_SNAKE_CASE__ = features[self.label_column] SCREAMING_SNAKE_CASE__ = label_schema return task_template @property def _snake_case ( self :str ) -> Dict[str, str]: """simple docstring""" return { self.text_column: "text", self.label_column: "labels", }
6
0
from ...configuration_utils import PretrainedConfig from ...utils import logging a : List[Any] = logging.get_logger(__name__) a : List[str] = { "sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json", # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class a ( lowercase__ ): """simple docstring""" a : Tuple = 'vit_msn' def __init__( self : Any , __lowercase : Any=768 , __lowercase : Union[str, Any]=12 , __lowercase : Any=12 , __lowercase : int=3072 , __lowercase : Optional[Any]="gelu" , __lowercase : Optional[int]=0.0 , __lowercase : Union[str, Any]=0.0 , __lowercase : Union[str, Any]=0.02 , __lowercase : Tuple=1e-0_6 , __lowercase : Dict=224 , __lowercase : List[str]=16 , __lowercase : List[str]=3 , __lowercase : Dict=True , **__lowercase : Tuple , ) -> Dict: super().__init__(**__lowercase ) __UpperCAmelCase : Optional[Any] = hidden_size __UpperCAmelCase : str = num_hidden_layers __UpperCAmelCase : List[Any] = num_attention_heads __UpperCAmelCase : Union[str, Any] = intermediate_size __UpperCAmelCase : Union[str, Any] = hidden_act __UpperCAmelCase : str = hidden_dropout_prob __UpperCAmelCase : Tuple = attention_probs_dropout_prob __UpperCAmelCase : Optional[Any] = initializer_range __UpperCAmelCase : List[Any] = layer_norm_eps __UpperCAmelCase : List[Any] = image_size __UpperCAmelCase : Optional[int] = patch_size __UpperCAmelCase : Optional[Any] = num_channels __UpperCAmelCase : Optional[int] = qkv_bias
63
import argparse import torch from datasets import load_dataset from donut import DonutModel from transformers import ( DonutImageProcessor, DonutProcessor, DonutSwinConfig, DonutSwinModel, MBartConfig, MBartForCausalLM, VisionEncoderDecoderModel, XLMRobertaTokenizerFast, ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): SCREAMING_SNAKE_CASE__ = model.config SCREAMING_SNAKE_CASE__ = DonutSwinConfig( image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , ) SCREAMING_SNAKE_CASE__ = MBartConfig( is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , add_cross_attention=UpperCamelCase__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len( model.decoder.tokenizer ) , scale_embedding=UpperCamelCase__ , add_final_layer_norm=UpperCamelCase__ , ) return encoder_config, decoder_config def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ): if "encoder.model" in name: SCREAMING_SNAKE_CASE__ = name.replace("""encoder.model""" , """encoder""" ) if "decoder.model" in name: SCREAMING_SNAKE_CASE__ = name.replace("""decoder.model""" , """decoder""" ) if "patch_embed.proj" in name: SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if name.startswith("""encoder""" ): if "layers" in name: SCREAMING_SNAKE_CASE__ = """encoder.""" + name if "attn.proj" in name: SCREAMING_SNAKE_CASE__ = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name and "mask" not in name: SCREAMING_SNAKE_CASE__ = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: SCREAMING_SNAKE_CASE__ = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: SCREAMING_SNAKE_CASE__ = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc2""" , """output.dense""" ) if name == "encoder.norm.weight": SCREAMING_SNAKE_CASE__ = """encoder.layernorm.weight""" if name == "encoder.norm.bias": SCREAMING_SNAKE_CASE__ = """encoder.layernorm.bias""" return name def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Optional[int] ): for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE__ = orig_state_dict.pop(UpperCamelCase__ ) if "qkv" in key: SCREAMING_SNAKE_CASE__ = key.split(""".""" ) SCREAMING_SNAKE_CASE__ = int(key_split[3] ) SCREAMING_SNAKE_CASE__ = int(key_split[5] ) SCREAMING_SNAKE_CASE__ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: SCREAMING_SNAKE_CASE__ = val[:dim, :] SCREAMING_SNAKE_CASE__ = val[dim : dim * 2, :] SCREAMING_SNAKE_CASE__ = val[-dim:, :] else: SCREAMING_SNAKE_CASE__ = val[:dim] SCREAMING_SNAKE_CASE__ = val[dim : dim * 2] SCREAMING_SNAKE_CASE__ = val[-dim:] elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]: # HuggingFace implementation doesn't use attn_mask buffer # and model doesn't use final LayerNorms for the encoder pass else: SCREAMING_SNAKE_CASE__ = val return orig_state_dict def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int=None , UpperCamelCase__: str=False ): # load original model SCREAMING_SNAKE_CASE__ = DonutModel.from_pretrained(UpperCamelCase__ ).eval() # load HuggingFace model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_configs(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = DonutSwinModel(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = MBartForCausalLM(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ ) model.eval() SCREAMING_SNAKE_CASE__ = original_model.state_dict() SCREAMING_SNAKE_CASE__ = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ ) model.load_state_dict(UpperCamelCase__ ) # verify results on scanned document SCREAMING_SNAKE_CASE__ = load_dataset("""hf-internal-testing/example-documents""" ) SCREAMING_SNAKE_CASE__ = dataset["""test"""][0]["""image"""].convert("""RGB""" ) SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizerFast.from_pretrained(UpperCamelCase__ , from_slow=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = DonutImageProcessor( do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] ) SCREAMING_SNAKE_CASE__ = DonutProcessor(UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = processor(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values if model_name == "naver-clova-ix/donut-base-finetuned-docvqa": SCREAMING_SNAKE_CASE__ = """<s_docvqa><s_question>{user_input}</s_question><s_answer>""" SCREAMING_SNAKE_CASE__ = """When is the coffee break?""" SCREAMING_SNAKE_CASE__ = task_prompt.replace("""{user_input}""" , UpperCamelCase__ ) elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip": SCREAMING_SNAKE_CASE__ = """<s_rvlcdip>""" elif model_name in [ "naver-clova-ix/donut-base-finetuned-cord-v1", "naver-clova-ix/donut-base-finetuned-cord-v1-2560", ]: SCREAMING_SNAKE_CASE__ = """<s_cord>""" elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2": SCREAMING_SNAKE_CASE__ = """s_cord-v2>""" elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket": SCREAMING_SNAKE_CASE__ = """<s_zhtrainticket>""" elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]: # use a random prompt SCREAMING_SNAKE_CASE__ = """hello world""" else: raise ValueError("""Model name not supported""" ) SCREAMING_SNAKE_CASE__ = original_model.decoder.tokenizer(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors="""pt""" )[ """input_ids""" ] SCREAMING_SNAKE_CASE__ = original_model.encoder.model.patch_embed(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.encoder.embeddings(UpperCamelCase__ ) assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) # verify encoder hidden states SCREAMING_SNAKE_CASE__ = original_model.encoder(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = model.encoder(UpperCamelCase__ ).last_hidden_state assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-2 ) # verify decoder hidden states SCREAMING_SNAKE_CASE__ = original_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).logits SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCamelCase__ ) processor.save_pretrained(UpperCamelCase__ ) if push_to_hub: model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" ) processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" ) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='naver-clova-ix/donut-base-finetuned-docvqa', required=False, type=str, help='Name of the original model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, required=False, type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model and processor to the 🤗 hub.', ) _lowerCamelCase = parser.parse_args() convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
6
0
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def A__ ( ): SCREAMING_SNAKE_CASE__: str= ArgumentParser( description=( '''PyTorch TPU distributed training launch ''' '''helper utility that will spawn up ''' '''multiple distributed processes''' ) ) # Optional arguments for the launch helper parser.add_argument('''--num_cores''' , type=snake_case_ , default=1 , help='''Number of TPU cores to use (1 or 8).''' ) # positional parser.add_argument( '''training_script''' , type=snake_case_ , help=( '''The full path to the single TPU training ''' '''program/script to be launched in parallel, ''' '''followed by all the arguments for the ''' '''training script''' ) , ) # rest from the training program parser.add_argument('''training_script_args''' , nargs=snake_case_ ) return parser.parse_args() def A__ ( ): SCREAMING_SNAKE_CASE__: Dict= parse_args() # Import training_script as a module. SCREAMING_SNAKE_CASE__: List[Any]= Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) SCREAMING_SNAKE_CASE__: Optional[Any]= script_fpath.stem SCREAMING_SNAKE_CASE__: Dict= importlib.import_module(snake_case_ ) # Patch sys.argv SCREAMING_SNAKE_CASE__: List[str]= [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
64
import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class UpperCamelCase_ ( unittest.TestCase ): def _snake_case ( self :Union[str, Any] ) -> List[str]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self :Any ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" ) SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) sd_pipe.set_scheduler("""sample_euler""" ) SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger""" SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" ) SCREAMING_SNAKE_CASE__ = output.images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _snake_case ( self :str ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) sd_pipe.set_scheduler("""sample_euler""" ) SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger""" SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" ) SCREAMING_SNAKE_CASE__ = output.images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1 def _snake_case ( self :Tuple ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) sd_pipe.set_scheduler("""sample_dpmpp_2m""" ) SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger""" SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = sd_pipe( [prompt] , generator=__A , guidance_scale=7.5 , num_inference_steps=15 , output_type="""np""" , use_karras_sigmas=__A , ) SCREAMING_SNAKE_CASE__ = output.images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ = np.array( [0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
6
0
"""simple docstring""" import random def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = [], [], [] for element in data: if element < pivot: less.append(__UpperCamelCase ) elif element > pivot: greater.append(__UpperCamelCase ) else: equal.append(__UpperCamelCase ) return less, equal, greater def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' if index >= len(__UpperCamelCase ) or index < 0: return None UpperCAmelCase__ : List[Any] = items[random.randint(0 , len(__UpperCamelCase ) - 1 )] UpperCAmelCase__ : Optional[Any] = 0 UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = _partition(__UpperCamelCase , __UpperCamelCase ) UpperCAmelCase__ : Tuple = len(__UpperCamelCase ) UpperCAmelCase__ : Optional[Any] = len(__UpperCamelCase ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(__UpperCamelCase , __UpperCamelCase ) # must be in larger else: return quick_select(__UpperCamelCase , index - (m + count) )
65
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int = 600_851_475_143 ): try: SCREAMING_SNAKE_CASE__ = int(UpperCamelCase__ ) except (TypeError, ValueError): raise TypeError("""Parameter n must be int or castable to int.""" ) if n <= 0: raise ValueError("""Parameter n must be greater than or equal to one.""" ) SCREAMING_SNAKE_CASE__ = 1 SCREAMING_SNAKE_CASE__ = 2 while i * i <= n: while n % i == 0: SCREAMING_SNAKE_CASE__ = i n //= i i += 1 if n > 1: SCREAMING_SNAKE_CASE__ = n return int(UpperCamelCase__ ) if __name__ == "__main__": print(F'''{solution() = }''')
6
0
import numpy as np UpperCamelCase = [ ["a", "b", "c", "d", "e"], ["f", "g", "h", "i", "k"], ["l", "m", "n", "o", "p"], ["q", "r", "s", "t", "u"], ["v", "w", "x", "y", "z"], ] class lowerCAmelCase_ : def __init__( self ): _lowercase : Any = np.array(_lowerCAmelCase ) def __a ( self , _lowerCAmelCase ): _lowercase , _lowercase : List[str] = np.where(letter == self.SQUARE ) _lowercase : Optional[int] = np.concatenate([indexa + 1, indexa + 1] ) return indexes def __a ( self , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : int = self.SQUARE[indexa - 1, indexa - 1] return letter def __a ( self , _lowerCAmelCase ): _lowercase : int = message.lower() _lowercase : Optional[Any] = message.replace(' ' , '' ) _lowercase : Optional[Any] = message.replace('j' , 'i' ) _lowercase : Optional[int] = np.empty((2, len(_lowerCAmelCase )) ) for letter_index in range(len(_lowerCAmelCase ) ): _lowercase : Any = self.letter_to_numbers(message[letter_index] ) _lowercase : Tuple = numbers[0] _lowercase : int = numbers[1] _lowercase : int = first_step.reshape(2 * len(_lowerCAmelCase ) ) _lowercase : Tuple = '' for numbers_index in range(len(_lowerCAmelCase ) ): _lowercase : int = int(second_step[numbers_index * 2] ) _lowercase : List[Any] = int(second_step[(numbers_index * 2) + 1] ) _lowercase : Optional[int] = self.numbers_to_letter(_lowerCAmelCase , _lowerCAmelCase ) _lowercase : List[Any] = encoded_message + letter return encoded_message def __a ( self , _lowerCAmelCase ): _lowercase : Tuple = message.lower() message.replace(' ' , '' ) _lowercase : Optional[Any] = np.empty(2 * len(_lowerCAmelCase ) ) for letter_index in range(len(_lowerCAmelCase ) ): _lowercase : List[Any] = self.letter_to_numbers(message[letter_index] ) _lowercase : int = numbers[0] _lowercase : Union[str, Any] = numbers[1] _lowercase : Optional[int] = first_step.reshape((2, len(_lowerCAmelCase )) ) _lowercase : Tuple = '' for numbers_index in range(len(_lowerCAmelCase ) ): _lowercase : Tuple = int(second_step[0, numbers_index] ) _lowercase : Dict = int(second_step[1, numbers_index] ) _lowercase : List[Any] = self.numbers_to_letter(_lowerCAmelCase , _lowerCAmelCase ) _lowercase : Dict = decoded_message + letter return decoded_message
66
import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class UpperCamelCase_ ( unittest.TestCase ): def _snake_case ( self :Tuple ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertTrue(is_safetensors_compatible(__A ) ) def _snake_case ( self :List[str] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertTrue(is_safetensors_compatible(__A ) ) def _snake_case ( self :Tuple ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", """unet/diffusion_pytorch_model.bin""", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__A ) ) def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", ] self.assertTrue(is_safetensors_compatible(__A ) ) def _snake_case ( self :Optional[Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", # Removed: 'text_encoder/model.safetensors', """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertFalse(is_safetensors_compatible(__A ) ) def _snake_case ( self :Tuple ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :Any ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :str ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :List[Any] ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", """unet/diffusion_pytorch_model.fp16.bin""", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertFalse(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :str ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :Optional[int] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", # 'text_encoder/model.fp16.safetensors', """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertFalse(is_safetensors_compatible(__A , variant=__A ) )
6
0
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Tuple: _lowercase = 384 _lowercase = 7 if "tiny" in model_name: _lowercase = 96 _lowercase = (2, 2, 6, 2) _lowercase = (3, 6, 12, 24) elif "small" in model_name: _lowercase = 96 _lowercase = (2, 2, 18, 2) _lowercase = (3, 6, 12, 24) elif "base" in model_name: _lowercase = 128 _lowercase = (2, 2, 18, 2) _lowercase = (4, 8, 16, 32) _lowercase = 12 _lowercase = 512 elif "large" in model_name: _lowercase = 192 _lowercase = (2, 2, 18, 2) _lowercase = (6, 12, 24, 48) _lowercase = 12 _lowercase = 768 # set label information _lowercase = 150 _lowercase = 'huggingface/label-files' _lowercase = 'ade20k-id2label.json' _lowercase = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) ) _lowercase = {int(snake_case__ ): v for k, v in idalabel.items()} _lowercase = {v: k for k, v in idalabel.items()} _lowercase = SwinConfig( embed_dim=snake_case__ , depths=snake_case__ , num_heads=snake_case__ , window_size=snake_case__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) _lowercase = UperNetConfig( backbone_config=snake_case__ , auxiliary_in_channels=snake_case__ , num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ , ) return config def SCREAMING_SNAKE_CASE__ ( snake_case__ :Union[str, Any] ) -> List[Any]: _lowercase = [] # fmt: off # stem rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') ) rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') ) rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') ) rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") ) if i < 3: rename_keys.append((F"""backbone.stages.{i}.downsample.reduction.weight""", F"""backbone.encoder.layers.{i}.downsample.reduction.weight""") ) rename_keys.append((F"""backbone.stages.{i}.downsample.norm.weight""", F"""backbone.encoder.layers.{i}.downsample.norm.weight""") ) rename_keys.append((F"""backbone.stages.{i}.downsample.norm.bias""", F"""backbone.encoder.layers.{i}.downsample.norm.bias""") ) rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") ) rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") ) # decode head rename_keys.extend( [ ('decode_head.conv_seg.weight', 'decode_head.classifier.weight'), ('decode_head.conv_seg.bias', 'decode_head.classifier.bias'), ('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'), ('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'), ] ) # fmt: on return rename_keys def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any , snake_case__ :List[Any] , snake_case__ :List[str] ) -> Any: _lowercase = dct.pop(snake_case__ ) _lowercase = val def SCREAMING_SNAKE_CASE__ ( snake_case__ :str , snake_case__ :Tuple ) -> Union[str, Any]: _lowercase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): _lowercase = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) _lowercase = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" ) _lowercase = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict _lowercase = in_proj_weight[:dim, :] _lowercase = in_proj_bias[: dim] _lowercase = in_proj_weight[ dim : dim * 2, : ] _lowercase = in_proj_bias[ dim : dim * 2 ] _lowercase = in_proj_weight[ -dim :, : ] _lowercase = in_proj_bias[-dim :] # fmt: on def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Optional[Any]: _lowercase , _lowercase = x.shape _lowercase = x.reshape(snake_case__ , 4 , in_channel // 4 ) _lowercase = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(snake_case__ , snake_case__ ) return x def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> List[Any]: _lowercase , _lowercase = x.shape _lowercase = x.reshape(snake_case__ , in_channel // 4 , 4 ) _lowercase = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(snake_case__ , snake_case__ ) return x def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Tuple: _lowercase = x.shape[0] _lowercase = x.reshape(4 , in_channel // 4 ) _lowercase = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(snake_case__ ) return x def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] ) -> Any: _lowercase = x.shape[0] _lowercase = x.reshape(in_channel // 4 , 4 ) _lowercase = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(snake_case__ ) return x def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :str , snake_case__ :Optional[Any] ) -> Optional[Any]: _lowercase = { 'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth', 'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth', 'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth', 'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth', } _lowercase = model_name_to_url[model_name] _lowercase = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' , file_name=snake_case__ )[ 'state_dict' ] for name, param in state_dict.items(): print(snake_case__ , param.shape ) _lowercase = get_upernet_config(snake_case__ ) _lowercase = UperNetForSemanticSegmentation(snake_case__ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): _lowercase = state_dict.pop(snake_case__ ) if "bn" in key: _lowercase = key.replace('bn' , 'batch_norm' ) _lowercase = val # rename keys _lowercase = create_rename_keys(snake_case__ ) for src, dest in rename_keys: rename_key(snake_case__ , snake_case__ , snake_case__ ) read_in_q_k_v(snake_case__ , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: _lowercase = reverse_correct_unfold_reduction_order(snake_case__ ) if "norm" in key: _lowercase = reverse_correct_unfold_norm_order(snake_case__ ) model.load_state_dict(snake_case__ ) # verify on image _lowercase = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg' _lowercase = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' ) _lowercase = SegformerImageProcessor() _lowercase = processor(snake_case__ , return_tensors='pt' ).pixel_values with torch.no_grad(): _lowercase = model(snake_case__ ) _lowercase = outputs.logits print(logits.shape ) print('First values of logits:' , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": _lowercase = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ) elif model_name == "upernet-swin-small": _lowercase = torch.tensor( [[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] ) elif model_name == "upernet-swin-base": _lowercase = torch.tensor( [[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] ) elif model_name == "upernet-swin-large": _lowercase = torch.tensor( [[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] ) print('Logits:' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case__ , atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(snake_case__ ) print(F"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(snake_case__ ) if push_to_hub: print(F"""Pushing model and processor for {model_name} to hub""" ) model.push_to_hub(F"""openmmlab/{model_name}""" ) processor.push_to_hub(F"""openmmlab/{model_name}""" ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""upernet-swin-tiny""", type=str, choices=[F"""upernet-swin-{size}""" for size in ["""tiny""", """small""", """base""", """large"""]], help="""Name of the Swin + UperNet model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) snake_case = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
67
import argparse import datetime def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): SCREAMING_SNAKE_CASE__ = { """0""": """Sunday""", """1""": """Monday""", """2""": """Tuesday""", """3""": """Wednesday""", """4""": """Thursday""", """5""": """Friday""", """6""": """Saturday""", } SCREAMING_SNAKE_CASE__ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(UpperCamelCase__ ) < 11: raise ValueError("""Must be 10 characters long""" ) # Get month SCREAMING_SNAKE_CASE__ = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError("""Month must be between 1 - 12""" ) SCREAMING_SNAKE_CASE__ = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError("""Date separator must be '-' or '/'""" ) # Get day SCREAMING_SNAKE_CASE__ = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError("""Date must be between 1 - 31""" ) # Get second separator SCREAMING_SNAKE_CASE__ = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError("""Date separator must be '-' or '/'""" ) # Get year SCREAMING_SNAKE_CASE__ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8_500: raise ValueError( """Year out of range. There has to be some sort of limit...right?""" ) # Get datetime obj for validation SCREAMING_SNAKE_CASE__ = datetime.date(int(UpperCamelCase__ ) , int(UpperCamelCase__ ) , int(UpperCamelCase__ ) ) # Start math if m <= 2: SCREAMING_SNAKE_CASE__ = y - 1 SCREAMING_SNAKE_CASE__ = m + 12 # maths var SCREAMING_SNAKE_CASE__ = int(str(UpperCamelCase__ )[:2] ) SCREAMING_SNAKE_CASE__ = int(str(UpperCamelCase__ )[2:] ) SCREAMING_SNAKE_CASE__ = int(2.6 * m - 5.3_9 ) SCREAMING_SNAKE_CASE__ = int(c / 4 ) SCREAMING_SNAKE_CASE__ = int(k / 4 ) SCREAMING_SNAKE_CASE__ = int(d + k ) SCREAMING_SNAKE_CASE__ = int(t + u + v + x ) SCREAMING_SNAKE_CASE__ = int(z - (2 * c) ) SCREAMING_SNAKE_CASE__ = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" ) # Response SCREAMING_SNAKE_CASE__ = f'''Your date {date_input}, is a {days[str(UpperCamelCase__ )]}!''' return response if __name__ == "__main__": import doctest doctest.testmod() _lowerCamelCase = argparse.ArgumentParser( description=( 'Find out what day of the week nearly any date is or was. Enter ' 'date as a string in the mm-dd-yyyy or mm/dd/yyyy format' ) ) parser.add_argument( 'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)' ) _lowerCamelCase = parser.parse_args() zeller(args.date_input)
6
0
import comet # From: unbabel-comet import torch import datasets __A = datasets.logging.get_logger(__name__) __A = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n" __A = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n" __A = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _A ( datasets.Metric ): """simple docstring""" def _a ( self : Optional[Any] ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="""https://unbabel.github.io/COMET/html/index.html""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """sources""": datasets.Value("""string""" , id="""sequence""" ), """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/Unbabel/COMET"""] , reference_urls=[ """https://github.com/Unbabel/COMET""", """https://www.aclweb.org/anthology/2020.emnlp-main.213/""", """http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6""", ] , ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]: if self.config_name == "default": __UpperCAmelCase =comet.load_from_checkpoint(comet.download_model("""wmt20-comet-da""" ) ) else: __UpperCAmelCase =comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : List[str]=False ) -> Union[str, Any]: if gpus is None: __UpperCAmelCase =1 if torch.cuda.is_available() else 0 __UpperCAmelCase ={"""src""": sources, """mt""": predictions, """ref""": references} __UpperCAmelCase =[dict(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) for t in zip(*data.values() )] __UpperCAmelCase , __UpperCAmelCase =self.scorer.predict(__SCREAMING_SNAKE_CASE , gpus=__SCREAMING_SNAKE_CASE , progress_bar=__SCREAMING_SNAKE_CASE ) return {"mean_score": mean_score, "scores": scores}
68
import argparse import logging import pickle from collections import Counter logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) _lowerCamelCase = logging.getLogger(__name__) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser( description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)' ) parser.add_argument( '--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.' ) parser.add_argument( '--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.' ) parser.add_argument('--vocab_size', default=30522, type=int) _lowerCamelCase = parser.parse_args() logger.info(F'''Loading data from {args.data_file}''') with open(args.data_file, 'rb') as fp: _lowerCamelCase = pickle.load(fp) logger.info('Counting occurrences for MLM.') _lowerCamelCase = Counter() for tk_ids in data: counter.update(tk_ids) _lowerCamelCase = [0] * args.vocab_size for k, v in counter.items(): _lowerCamelCase = v logger.info(F'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, 'wb') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
6
0
'''simple docstring''' from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar a : str = TypeVar('''KEY''') a : Tuple = TypeVar('''VAL''') @dataclass(frozen=_UpperCamelCase , slots=_UpperCamelCase ) class SCREAMING_SNAKE_CASE__ ( Generic[KEY, VAL] ): __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 class SCREAMING_SNAKE_CASE__ ( _Item ): def __init__( self : Tuple ): """simple docstring""" super().__init__(a_ , a_ ) def __bool__( self : List[Any] ): """simple docstring""" return False a : Any = _DeletedItem() class SCREAMING_SNAKE_CASE__ ( MutableMapping[KEY, VAL] ): def __init__( self : Any , a_ : int = 8 , a_ : float = 0.75 ): """simple docstring""" __snake_case = initial_block_size __snake_case = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 __snake_case = capacity_factor __snake_case = 0 def A ( self : Union[str, Any] , a_ : KEY ): """simple docstring""" return hash(a_ ) % len(self._buckets ) def A ( self : Optional[Any] , a_ : int ): """simple docstring""" return (ind + 1) % len(self._buckets ) def A ( self : List[str] , a_ : int , a_ : KEY , a_ : VAL ): """simple docstring""" __snake_case = self._buckets[ind] if not stored: __snake_case = _Item(a_ , a_ ) self._len += 1 return True elif stored.key == key: __snake_case = _Item(a_ , a_ ) return True else: return False def A ( self : Tuple ): """simple docstring""" __snake_case = len(self._buckets ) * self._capacity_factor return len(self ) >= int(a_ ) def A ( self : Optional[int] ): """simple docstring""" if len(self._buckets ) <= self._initial_block_size: return False __snake_case = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def A ( self : str , a_ : int ): """simple docstring""" __snake_case = self._buckets __snake_case = [None] * new_size __snake_case = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def A ( self : List[Any] ): """simple docstring""" self._resize(len(self._buckets ) * 2 ) def A ( self : int ): """simple docstring""" self._resize(len(self._buckets ) // 2 ) def A ( self : List[Any] , a_ : KEY ): """simple docstring""" __snake_case = self._get_bucket_index(a_ ) for _ in range(len(self._buckets ) ): yield ind __snake_case = self._get_next_ind(a_ ) def A ( self : str , a_ : KEY , a_ : VAL ): """simple docstring""" for ind in self._iterate_buckets(a_ ): if self._try_set(a_ , a_ , a_ ): break def __setitem__( self : Union[str, Any] , a_ : KEY , a_ : VAL ): """simple docstring""" if self._is_full(): self._size_up() self._add_item(a_ , a_ ) def __delitem__( self : List[str] , a_ : KEY ): """simple docstring""" for ind in self._iterate_buckets(a_ ): __snake_case = self._buckets[ind] if item is None: raise KeyError(a_ ) if item is _deleted: continue if item.key == key: __snake_case = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self : Optional[int] , a_ : KEY ): """simple docstring""" for ind in self._iterate_buckets(a_ ): __snake_case = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(a_ ) def __len__( self : Dict ): """simple docstring""" return self._len def __iter__( self : List[str] ): """simple docstring""" yield from (item.key for item in self._buckets if item) def __repr__( self : int ): """simple docstring""" __snake_case = " ,".join( f'''{item.key}: {item.val}''' for item in self._buckets if item ) return f'''HashMap({val_string})'''
69
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _lowerCamelCase = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = ['SpeechEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = ['FlaxSpeechEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys _lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
6
0
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : bool = False ): '''simple docstring''' if not isinstance(lowercase , lowercase ): lowerCamelCase_ = f"""Expected string as input, found {type(lowercase )}""" raise ValueError(lowercase ) if not isinstance(lowercase , lowercase ): lowerCamelCase_ = f"""Expected boolean as use_pascal parameter, found {type(lowercase )}""" raise ValueError(lowercase ) lowerCamelCase_ = input_str.split('_' ) lowerCamelCase_ = 0 if use_pascal else 1 lowerCamelCase_ = words[start_index:] lowerCamelCase_ = [word[0].upper() + word[1:] for word in words_to_capitalize] lowerCamelCase_ = '' if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
70
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = ["image_processor", "tokenizer"] lowerCamelCase_ = "OwlViTImageProcessor" lowerCamelCase_ = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self :Optional[Any] , __A :int=None , __A :Optional[int]=None , **__A :str ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __A , ) SCREAMING_SNAKE_CASE__ = kwargs.pop("""feature_extractor""" ) SCREAMING_SNAKE_CASE__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__A , __A ) def __call__( self :str , __A :Dict=None , __A :List[str]=None , __A :str=None , __A :Optional[int]="max_length" , __A :Tuple="np" , **__A :int ) -> Tuple: """simple docstring""" if text is None and query_images is None and images is None: raise ValueError( """You have to specify at least one text or query image or image. All three cannot be none.""" ) if text is not None: if isinstance(__A , __A ) or (isinstance(__A , __A ) and not isinstance(text[0] , __A )): SCREAMING_SNAKE_CASE__ = [self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )] elif isinstance(__A , __A ) and isinstance(text[0] , __A ): SCREAMING_SNAKE_CASE__ = [] # Maximum number of queries across batch SCREAMING_SNAKE_CASE__ = max([len(__A ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__A ) != max_num_queries: SCREAMING_SNAKE_CASE__ = t + [""" """] * (max_num_queries - len(__A )) SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , padding=__A , return_tensors=__A , **__A ) encodings.append(__A ) else: raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" ) if return_tensors == "np": SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 ) SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) else: raise ValueError("""Target return tensor type could not be returned""" ) SCREAMING_SNAKE_CASE__ = BatchEncoding() SCREAMING_SNAKE_CASE__ = input_ids SCREAMING_SNAKE_CASE__ = attention_mask if query_images is not None: SCREAMING_SNAKE_CASE__ = BatchEncoding() SCREAMING_SNAKE_CASE__ = self.image_processor( __A , return_tensors=__A , **__A ).pixel_values SCREAMING_SNAKE_CASE__ = query_pixel_values if images is not None: SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A ) if text is not None and images is not None: SCREAMING_SNAKE_CASE__ = image_features.pixel_values return encoding elif query_images is not None and images is not None: SCREAMING_SNAKE_CASE__ = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__A ) , tensor_type=__A ) def _snake_case ( self :List[Any] , *__A :Dict , **__A :Dict ) -> Optional[int]: """simple docstring""" return self.image_processor.post_process(*__A , **__A ) def _snake_case ( self :Optional[int] , *__A :Dict , **__A :List[str] ) -> Optional[Any]: """simple docstring""" return self.image_processor.post_process_object_detection(*__A , **__A ) def _snake_case ( self :str , *__A :List[str] , **__A :Union[str, Any] ) -> Any: """simple docstring""" return self.image_processor.post_process_image_guided_detection(*__A , **__A ) def _snake_case ( self :Dict , *__A :List[str] , **__A :List[str] ) -> int: """simple docstring""" return self.tokenizer.batch_decode(*__A , **__A ) def _snake_case ( self :Dict , *__A :Dict , **__A :List[str] ) -> str: """simple docstring""" return self.tokenizer.decode(*__A , **__A ) @property def _snake_case ( self :List[Any] ) -> Optional[int]: """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __A , ) return self.image_processor_class @property def _snake_case ( self :Any ) -> Optional[Any]: """simple docstring""" warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __A , ) return self.image_processor
6
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCamelCase = { """configuration_jukebox""": [ """JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """JukeboxConfig""", """JukeboxPriorConfig""", """JukeboxVQVAEConfig""", ], """tokenization_jukebox""": ["""JukeboxTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = [ """JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """JukeboxModel""", """JukeboxPreTrainedModel""", """JukeboxVQVAE""", """JukeboxPrior""", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys _lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
71
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = 42 class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ ): @register_to_config def __init__( self :Union[str, Any] , __A :int = 3 , __A :int = 3 , __A :Tuple[str] = ("DownEncoderBlock2D",) , __A :Tuple[str] = ("UpDecoderBlock2D",) , __A :Tuple[int] = (64,) , __A :int = 1 , __A :str = "silu" , __A :int = 3 , __A :int = 32 , __A :int = 256 , __A :int = 32 , __A :Optional[int] = None , __A :float = 0.1_8_2_1_5 , __A :str = "group" , ) -> Any: """simple docstring""" super().__init__() # pass init params to Encoder SCREAMING_SNAKE_CASE__ = Encoder( in_channels=__A , out_channels=__A , down_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , double_z=__A , ) SCREAMING_SNAKE_CASE__ = vq_embed_dim if vq_embed_dim is not None else latent_channels SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 ) SCREAMING_SNAKE_CASE__ = VectorQuantizer(__A , __A , beta=0.2_5 , remap=__A , sane_index_shape=__A ) SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 ) # pass init params to Decoder SCREAMING_SNAKE_CASE__ = Decoder( in_channels=__A , out_channels=__A , up_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , norm_type=__A , ) @apply_forward_hook def _snake_case ( self :Union[str, Any] , __A :torch.FloatTensor , __A :bool = True ) -> VQEncoderOutput: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.encoder(__A ) SCREAMING_SNAKE_CASE__ = self.quant_conv(__A ) if not return_dict: return (h,) return VQEncoderOutput(latents=__A ) @apply_forward_hook def _snake_case ( self :Tuple , __A :torch.FloatTensor , __A :bool = False , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: """simple docstring""" if not force_not_quantize: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.quantize(__A ) else: SCREAMING_SNAKE_CASE__ = h SCREAMING_SNAKE_CASE__ = self.post_quant_conv(__A ) SCREAMING_SNAKE_CASE__ = self.decoder(__A , quant if self.config.norm_type == """spatial""" else None ) if not return_dict: return (dec,) return DecoderOutput(sample=__A ) def _snake_case ( self :int , __A :torch.FloatTensor , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: """simple docstring""" SCREAMING_SNAKE_CASE__ = sample SCREAMING_SNAKE_CASE__ = self.encode(__A ).latents SCREAMING_SNAKE_CASE__ = self.decode(__A ).sample if not return_dict: return (dec,) return DecoderOutput(sample=__A )
6
0
'''simple docstring''' import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class __magic_name__ : def __init__( self , snake_case_ , snake_case_=2 , snake_case_=True , snake_case_=False , snake_case_=10 , snake_case_=3 , snake_case_=32 * 4 , snake_case_=32 * 6 , snake_case_=4 , snake_case_=32 , ): lowercase =parent lowercase =batch_size lowercase =is_training lowercase =use_auxiliary_loss lowercase =num_queries lowercase =num_channels lowercase =min_size lowercase =max_size lowercase =num_labels lowercase =mask_feature_size def _A( self ): lowercase =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( snake_case_ ) lowercase =torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case_ ) lowercase =( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case_ ) > 0.5 ).float() lowercase =(torch.rand((self.batch_size, self.num_labels) , device=snake_case_ ) > 0.5).long() lowercase =self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def _A( self ): return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def _A( self ): lowercase , lowercase , lowercase , lowercase , lowercase =self.prepare_config_and_inputs() lowercase ={'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def _A( self , snake_case_ , snake_case_ ): lowercase =output.encoder_hidden_states lowercase =output.pixel_decoder_hidden_states lowercase =output.transformer_decoder_hidden_states self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(snake_case_ ) , config.decoder_config.decoder_layers ) def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=False ): with torch.no_grad(): lowercase =MaskFormerModel(config=snake_case_ ) model.to(snake_case_ ) model.eval() lowercase =model(pixel_values=snake_case_ , pixel_mask=snake_case_ ) lowercase =model(snake_case_ , output_hidden_states=snake_case_ ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(snake_case_ , snake_case_ ) def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): lowercase =MaskFormerForInstanceSegmentation(config=snake_case_ ) model.to(snake_case_ ) model.eval() def comm_check_on_output(snake_case_ ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): lowercase =model(pixel_values=snake_case_ , pixel_mask=snake_case_ ) lowercase =model(snake_case_ ) comm_check_on_output(snake_case_ ) lowercase =model( pixel_values=snake_case_ , pixel_mask=snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ) comm_check_on_output(snake_case_ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class __magic_name__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): UpperCamelCase__ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () UpperCamelCase__ = ( {'feature-extraction': MaskFormerModel, 'image-segmentation': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def _A( self ): lowercase =MaskFormerModelTester(self ) lowercase =ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ ) def _A( self ): self.config_tester.run_common_tests() def _A( self ): lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ ) def _A( self ): lowercase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*snake_case_ ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def _A( self ): pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def _A( self ): pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def _A( self ): pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def _A( self ): pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def _A( self ): pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def _A( self ): pass def _A( self ): lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase =model_class(snake_case_ ) lowercase =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase =[*signature.parameters.keys()] lowercase =['''pixel_values'''] self.assertListEqual(arg_names[:1] , snake_case_ ) @slow def _A( self ): for model_name in ["facebook/maskformer-swin-small-coco"]: lowercase =MaskFormerModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) def _A( self ): lowercase =(self.model_tester.min_size,) * 2 lowercase ={ '''pixel_values''': torch.randn((2, 3, *size) , device=snake_case_ ), '''mask_labels''': torch.randn((2, 10, *size) , device=snake_case_ ), '''class_labels''': torch.zeros(2 , 10 , device=snake_case_ ).long(), } lowercase =MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(snake_case_ ) lowercase =model(**snake_case_ ) self.assertTrue(outputs.loss is not None ) def _A( self ): lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ ) def _A( self ): lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase =model_class(snake_case_ ).to(snake_case_ ) lowercase =model(**snake_case_ , output_attentions=snake_case_ ) self.assertTrue(outputs.attentions is not None ) def _A( self ): if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss lowercase =self.all_model_classes[1] lowercase , lowercase , lowercase , lowercase , lowercase =self.model_tester.prepare_config_and_inputs() lowercase =model_class(snake_case_ ) model.to(snake_case_ ) model.train() lowercase =model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ).loss loss.backward() def _A( self ): # only MaskFormerForInstanceSegmentation has the loss lowercase =self.all_model_classes[1] lowercase , lowercase , lowercase , lowercase , lowercase =self.model_tester.prepare_config_and_inputs() lowercase =True lowercase =True lowercase =model_class(snake_case_ ) model.to(snake_case_ ) model.train() lowercase =model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ) lowercase =outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() lowercase =outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't lowercase =outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() lowercase =outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=snake_case_ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) _UpperCAmelCase : List[Any] = 1e-4 def UpperCamelCase ( ) -> Optional[Any]: '''simple docstring''' lowercase =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class __magic_name__ ( unittest.TestCase ): @cached_property def _A( self ): return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def _A( self ): lowercase =MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(snake_case_ ) lowercase =self.default_image_processor lowercase =prepare_img() lowercase =image_processor(snake_case_ , return_tensors='''pt''' ).to(snake_case_ ) lowercase =inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(snake_case_ , (1, 3, 8_00, 10_88) ) with torch.no_grad(): lowercase =model(**snake_case_ ) lowercase =torch.tensor( [[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(snake_case_ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) ) lowercase =torch.tensor( [[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(snake_case_ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) ) lowercase =torch.tensor( [[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(snake_case_ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case_ , atol=snake_case_ ) ) def _A( self ): lowercase =( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(snake_case_ ) .eval() ) lowercase =self.default_image_processor lowercase =prepare_img() lowercase =image_processor(snake_case_ , return_tensors='''pt''' ).to(snake_case_ ) lowercase =inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(snake_case_ , (1, 3, 8_00, 10_88) ) with torch.no_grad(): lowercase =model(**snake_case_ ) # masks_queries_logits lowercase =outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) lowercase =[ [-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33], [-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95], [-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42], ] lowercase =torch.tensor(snake_case_ ).to(snake_case_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) ) # class_queries_logits lowercase =outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowercase =torch.tensor( [ [1.6_512E00, -5.2_572E00, -3.3_519E00], [3.6_169E-02, -5.9_025E00, -2.9_313E00], [1.0_766E-04, -7.7_630E00, -5.1_263E00], ] ).to(snake_case_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) ) def _A( self ): lowercase =( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(snake_case_ ) .eval() ) lowercase =self.default_image_processor lowercase =prepare_img() lowercase =image_processor(snake_case_ , return_tensors='''pt''' ).to(snake_case_ ) lowercase =inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(snake_case_ , (1, 3, 8_00, 10_88) ) with torch.no_grad(): lowercase =model(**snake_case_ ) # masks_queries_logits lowercase =outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) lowercase =[[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]] lowercase =torch.tensor(snake_case_ ).to(snake_case_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) ) # class_queries_logits lowercase =outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowercase =torch.tensor( [[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(snake_case_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) ) def _A( self ): lowercase =( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(snake_case_ ) .eval() ) lowercase =self.default_image_processor lowercase =image_processor( [np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , ) lowercase =inputs['''pixel_values'''].to(snake_case_ ) lowercase =[el.to(snake_case_ ) for el in inputs['''mask_labels''']] lowercase =[el.to(snake_case_ ) for el in inputs['''class_labels''']] with torch.no_grad(): lowercase =model(**snake_case_ ) self.assertTrue(outputs.loss is not None )
72
import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = 42 lowerCamelCase_ = jnp.floataa lowerCamelCase_ = True def _snake_case ( self :Tuple ) -> Optional[Any]: """simple docstring""" super().setup() SCREAMING_SNAKE_CASE__ = nn.Dense(5 , dtype=self.dtype ) def __call__( self :List[Any] , *__A :int , **__A :Optional[Any] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = super().__call__(*__A , **__A ) SCREAMING_SNAKE_CASE__ = self.cls(outputs[2] ) return outputs[:2] + (cls_out,) class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = FlaxBigBirdForNaturalQuestionsModule def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Tuple , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ): def cross_entropy(UpperCamelCase__: List[str] , UpperCamelCase__: List[str] , UpperCamelCase__: List[str]=None ): SCREAMING_SNAKE_CASE__ = logits.shape[-1] SCREAMING_SNAKE_CASE__ = (labels[..., None] == jnp.arange(UpperCamelCase__ )[None]).astype("""f4""" ) SCREAMING_SNAKE_CASE__ = jax.nn.log_softmax(UpperCamelCase__ , axis=-1 ) SCREAMING_SNAKE_CASE__ = -jnp.sum(labels * logits , axis=-1 ) if reduction is not None: SCREAMING_SNAKE_CASE__ = reduction(UpperCamelCase__ ) return loss SCREAMING_SNAKE_CASE__ = partial(UpperCamelCase__ , reduction=jnp.mean ) SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class UpperCamelCase_ : lowerCamelCase_ = "google/bigbird-roberta-base" lowerCamelCase_ = 30_00 lowerCamelCase_ = 1_05_00 lowerCamelCase_ = 1_28 lowerCamelCase_ = 3 lowerCamelCase_ = 1 lowerCamelCase_ = 5 # tx_args lowerCamelCase_ = 3e-5 lowerCamelCase_ = 0.0 lowerCamelCase_ = 2_00_00 lowerCamelCase_ = 0.0095 lowerCamelCase_ = "bigbird-roberta-natural-questions" lowerCamelCase_ = "training-expt" lowerCamelCase_ = "data/nq-training.jsonl" lowerCamelCase_ = "data/nq-validation.jsonl" def _snake_case ( self :str ) -> Optional[int]: """simple docstring""" os.makedirs(self.base_dir , exist_ok=__A ) SCREAMING_SNAKE_CASE__ = os.path.join(self.base_dir , self.save_dir ) SCREAMING_SNAKE_CASE__ = self.batch_size_per_device * jax.device_count() @dataclass class UpperCamelCase_ : lowerCamelCase_ = 42 lowerCamelCase_ = 40_96 # no dynamic padding on TPUs def __call__( self :Optional[Any] , __A :Optional[int] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.collate_fn(__A ) SCREAMING_SNAKE_CASE__ = jax.tree_util.tree_map(__A , __A ) return batch def _snake_case ( self :List[Any] , __A :Union[str, Any] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.fetch_inputs(features["""input_ids"""] ) SCREAMING_SNAKE_CASE__ = { """input_ids""": jnp.array(__A , dtype=jnp.intaa ), """attention_mask""": jnp.array(__A , dtype=jnp.intaa ), """start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ), """end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ), """pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ), } return batch def _snake_case ( self :Tuple , __A :list ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = [self._fetch_inputs(__A ) for ids in input_ids] return zip(*__A ) def _snake_case ( self :List[str] , __A :list ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [1 for _ in range(len(__A ) )] while len(__A ) < self.max_length: input_ids.append(self.pad_id ) attention_mask.append(0 ) return input_ids, attention_mask def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any]=None ): if seed is not None: SCREAMING_SNAKE_CASE__ = dataset.shuffle(seed=UpperCamelCase__ ) for i in range(len(UpperCamelCase__ ) // batch_size ): SCREAMING_SNAKE_CASE__ = dataset[i * batch_size : (i + 1) * batch_size] yield dict(UpperCamelCase__ ) @partial(jax.pmap , axis_name="""batch""" ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] , **UpperCamelCase__: Optional[int] ): def loss_fn(UpperCamelCase__: List[Any] ): SCREAMING_SNAKE_CASE__ = model_inputs.pop("""start_labels""" ) SCREAMING_SNAKE_CASE__ = model_inputs.pop("""end_labels""" ) SCREAMING_SNAKE_CASE__ = model_inputs.pop("""pooled_labels""" ) SCREAMING_SNAKE_CASE__ = state.apply_fn(**UpperCamelCase__ , params=UpperCamelCase__ , dropout_rng=UpperCamelCase__ , train=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs return state.loss_fn( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = jax.random.split(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = jax.value_and_grad(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = grad_fn(state.params ) SCREAMING_SNAKE_CASE__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) SCREAMING_SNAKE_CASE__ = jax.lax.pmean(UpperCamelCase__ , """batch""" ) SCREAMING_SNAKE_CASE__ = state.apply_gradients(grads=UpperCamelCase__ ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name="""batch""" ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , **UpperCamelCase__: Dict ): SCREAMING_SNAKE_CASE__ = model_inputs.pop("""start_labels""" ) SCREAMING_SNAKE_CASE__ = model_inputs.pop("""end_labels""" ) SCREAMING_SNAKE_CASE__ = model_inputs.pop("""pooled_labels""" ) SCREAMING_SNAKE_CASE__ = state.apply_fn(**UpperCamelCase__ , params=state.params , train=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs SCREAMING_SNAKE_CASE__ = state.loss_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) return metrics class UpperCamelCase_ ( train_state.TrainState ): lowerCamelCase_ = struct.field(pytree_node=UpperCamelCase__ ) @dataclass class UpperCamelCase_ : lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = None def _snake_case ( self :List[Any] , __A :str , __A :str , __A :str , __A :Tuple=None ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = model.params SCREAMING_SNAKE_CASE__ = TrainState.create( apply_fn=model.__call__ , params=__A , tx=__A , loss_fn=__A , ) if ckpt_dir is not None: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = restore_checkpoint(__A , __A ) SCREAMING_SNAKE_CASE__ = { """lr""": args.lr, """init_lr""": args.init_lr, """warmup_steps""": args.warmup_steps, """num_train_steps""": num_train_steps, """weight_decay""": args.weight_decay, } SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = build_tx(**__A ) SCREAMING_SNAKE_CASE__ = train_state.TrainState( step=__A , apply_fn=model.__call__ , params=__A , tx=__A , opt_state=__A , ) SCREAMING_SNAKE_CASE__ = args SCREAMING_SNAKE_CASE__ = data_collator SCREAMING_SNAKE_CASE__ = lr SCREAMING_SNAKE_CASE__ = params SCREAMING_SNAKE_CASE__ = jax_utils.replicate(__A ) return state def _snake_case ( self :Optional[Any] , __A :Optional[int] , __A :int , __A :int ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.args SCREAMING_SNAKE_CASE__ = len(__A ) // args.batch_size SCREAMING_SNAKE_CASE__ = jax.random.PRNGKey(0 ) SCREAMING_SNAKE_CASE__ = jax.random.split(__A , jax.device_count() ) for epoch in range(args.max_epochs ): SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE__ = get_batched_dataset(__A , args.batch_size , seed=__A ) SCREAMING_SNAKE_CASE__ = 0 for batch in tqdm(__A , total=__A , desc=f'''Running EPOCH-{epoch}''' ): SCREAMING_SNAKE_CASE__ = self.data_collator(__A ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.train_step_fn(__A , __A , **__A ) running_loss += jax_utils.unreplicate(metrics["""loss"""] ) i += 1 if i % args.logging_steps == 0: SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(state.step ) SCREAMING_SNAKE_CASE__ = running_loss.item() / i SCREAMING_SNAKE_CASE__ = self.scheduler_fn(state_step - 1 ) SCREAMING_SNAKE_CASE__ = self.evaluate(__A , __A ) SCREAMING_SNAKE_CASE__ = { """step""": state_step.item(), """eval_loss""": eval_loss.item(), """tr_loss""": tr_loss, """lr""": lr.item(), } tqdm.write(str(__A ) ) self.logger.log(__A , commit=__A ) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=__A ) def _snake_case ( self :List[str] , __A :Dict , __A :str ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = get_batched_dataset(__A , self.args.batch_size ) SCREAMING_SNAKE_CASE__ = len(__A ) // self.args.batch_size SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE__ = 0 for batch in tqdm(__A , total=__A , desc="""Evaluating ... """ ): SCREAMING_SNAKE_CASE__ = self.data_collator(__A ) SCREAMING_SNAKE_CASE__ = self.val_step_fn(__A , **__A ) running_loss += jax_utils.unreplicate(metrics["""loss"""] ) i += 1 return running_loss / i def _snake_case ( self :List[Any] , __A :Any , __A :Dict ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(__A ) print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=""" ... """ ) self.model_save_fn(__A , params=state.params ) with open(os.path.join(__A , """opt_state.msgpack""" ) , """wb""" ) as f: f.write(to_bytes(state.opt_state ) ) joblib.dump(self.args , os.path.join(__A , """args.joblib""" ) ) joblib.dump(self.data_collator , os.path.join(__A , """data_collator.joblib""" ) ) with open(os.path.join(__A , """training_state.json""" ) , """w""" ) as f: json.dump({"""step""": state.step.item()} , __A ) print("""DONE""" ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ): print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=""" ... """ ) with open(os.path.join(UpperCamelCase__ , """flax_model.msgpack""" ) , """rb""" ) as f: SCREAMING_SNAKE_CASE__ = from_bytes(state.params , f.read() ) with open(os.path.join(UpperCamelCase__ , """opt_state.msgpack""" ) , """rb""" ) as f: SCREAMING_SNAKE_CASE__ = from_bytes(state.opt_state , f.read() ) SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(UpperCamelCase__ , """args.joblib""" ) ) SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(UpperCamelCase__ , """data_collator.joblib""" ) ) with open(os.path.join(UpperCamelCase__ , """training_state.json""" ) , """r""" ) as f: SCREAMING_SNAKE_CASE__ = json.load(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = training_state["""step"""] print("""DONE""" ) return params, opt_state, step, args, data_collator def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ): SCREAMING_SNAKE_CASE__ = num_train_steps - warmup_steps SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=UpperCamelCase__ , transition_steps=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=1e-7 , transition_steps=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple ): def weight_decay_mask(UpperCamelCase__: Any ): SCREAMING_SNAKE_CASE__ = traverse_util.flatten_dict(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()} return traverse_util.unflatten_dict(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = scheduler_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = optax.adamw(learning_rate=UpperCamelCase__ , weight_decay=UpperCamelCase__ , mask=UpperCamelCase__ ) return tx, lr
6
0
def lowerCamelCase__ (): return [ a * b * (1000 - a - b) for a in range(1 , 999) for b in range(_UpperCAmelCase , 999) if (a * a + b * b == (1000 - a - b) ** 2) ][0] if __name__ == "__main__": print(f"""{solution() = }""")
73
from torch import nn def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(f'''Unsupported activation function: {act_fn}''' )
6
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""", # See all YOLOS models at https://huggingface.co/models?filter=yolos } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''yolos''' def __init__( self : int , _A : Optional[Any]=768 , _A : Tuple=12 , _A : Any=12 , _A : Any=3072 , _A : Dict="gelu" , _A : Optional[Any]=0.0 , _A : List[str]=0.0 , _A : Optional[Any]=0.02 , _A : Union[str, Any]=1e-12 , _A : int=[512, 864] , _A : List[Any]=16 , _A : Tuple=3 , _A : str=True , _A : Optional[Any]=100 , _A : Optional[Any]=True , _A : Optional[Any]=False , _A : int=1 , _A : Any=5 , _A : Dict=2 , _A : int=5 , _A : List[Any]=2 , _A : Optional[Any]=0.1 , **_A : Optional[int] , ): """simple docstring""" super().__init__(**_A ) __SCREAMING_SNAKE_CASE : int = hidden_size __SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers __SCREAMING_SNAKE_CASE : List[str] = num_attention_heads __SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size __SCREAMING_SNAKE_CASE : Dict = hidden_act __SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : Any = initializer_range __SCREAMING_SNAKE_CASE : str = layer_norm_eps __SCREAMING_SNAKE_CASE : str = image_size __SCREAMING_SNAKE_CASE : str = patch_size __SCREAMING_SNAKE_CASE : List[str] = num_channels __SCREAMING_SNAKE_CASE : Any = qkv_bias __SCREAMING_SNAKE_CASE : Tuple = num_detection_tokens __SCREAMING_SNAKE_CASE : Any = use_mid_position_embeddings __SCREAMING_SNAKE_CASE : Optional[int] = auxiliary_loss # Hungarian matcher __SCREAMING_SNAKE_CASE : str = class_cost __SCREAMING_SNAKE_CASE : str = bbox_cost __SCREAMING_SNAKE_CASE : Tuple = giou_cost # Loss coefficients __SCREAMING_SNAKE_CASE : Optional[int] = bbox_loss_coefficient __SCREAMING_SNAKE_CASE : Optional[Any] = giou_loss_coefficient __SCREAMING_SNAKE_CASE : str = eos_coefficient class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = version.parse('''1.11''' ) @property def UpperCAmelCase__ ( self : Any ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def UpperCAmelCase__ ( self : str ): """simple docstring""" return 1e-4 @property def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return 12
74
import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): SCREAMING_SNAKE_CASE__ = SwinConfig.from_pretrained( """microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) SCREAMING_SNAKE_CASE__ = MaskFormerConfig(backbone_config=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = """huggingface/label-files""" if "ade20k-full" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 847 SCREAMING_SNAKE_CASE__ = """maskformer-ade20k-full-id2label.json""" elif "ade" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 150 SCREAMING_SNAKE_CASE__ = """ade20k-id2label.json""" elif "coco-stuff" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 171 SCREAMING_SNAKE_CASE__ = """maskformer-coco-stuff-id2label.json""" elif "coco" in model_name: # TODO SCREAMING_SNAKE_CASE__ = 133 SCREAMING_SNAKE_CASE__ = """coco-panoptic-id2label.json""" elif "cityscapes" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 19 SCREAMING_SNAKE_CASE__ = """cityscapes-id2label.json""" elif "vistas" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 65 SCREAMING_SNAKE_CASE__ = """mapillary-vistas-id2label.json""" SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) ) SCREAMING_SNAKE_CASE__ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} return config def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ): SCREAMING_SNAKE_CASE__ = [] # stem # fmt: off rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((f'''backbone.layers.{i}.downsample.reduction.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((f'''backbone.layers.{i}.downsample.norm.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((f'''backbone.layers.{i}.downsample.norm.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') ) # FPN rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((f'''sem_seg_head.adapter_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') ) rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') ) rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') ) rename_keys.append((f'''sem_seg_head.layer_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') ) rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') ) rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') ) rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") ) rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') ) # cross-attention out projection rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') ) # MLP 1 rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') ) # MLP 2 rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') ) # layernorm 1 (self-attention layernorm) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') ) # layernorm 2 (cross-attention layernorm) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') ) # layernorm 3 (final layernorm) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") ) # heads on top rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") ) for i in range(3 ): rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', f'''mask_embedder.{i}.0.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', f'''mask_embedder.{i}.0.bias''') ) # fmt: on return rename_keys def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] ): SCREAMING_SNAKE_CASE__ = dct.pop(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = val def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] ): SCREAMING_SNAKE_CASE__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): SCREAMING_SNAKE_CASE__ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' ) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ = in_proj_weight[:dim, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[: dim] SCREAMING_SNAKE_CASE__ = in_proj_weight[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE__ = in_proj_bias[ dim : dim * 2 ] SCREAMING_SNAKE_CASE__ = in_proj_weight[ -dim :, : ] SCREAMING_SNAKE_CASE__ = in_proj_bias[-dim :] # fmt: on def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ): # fmt: off SCREAMING_SNAKE_CASE__ = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' ) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size] SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2] SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' ) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size] SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2] SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :] # fmt: on def SCREAMING_SNAKE_CASE__ ( ): SCREAMING_SNAKE_CASE__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" SCREAMING_SNAKE_CASE__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: bool = False ): SCREAMING_SNAKE_CASE__ = get_maskformer_config(UpperCamelCase__ ) # load original state_dict with open(UpperCamelCase__ , """rb""" ) as f: SCREAMING_SNAKE_CASE__ = pickle.load(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = data["""model"""] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys SCREAMING_SNAKE_CASE__ = create_rename_keys(UpperCamelCase__ ) for src, dest in rename_keys: rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) read_in_swin_q_k_v(UpperCamelCase__ , config.backbone_config ) read_in_decoder_q_k_v(UpperCamelCase__ , UpperCamelCase__ ) # update to torch tensors for key, value in state_dict.items(): SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase__ ) # load 🤗 model SCREAMING_SNAKE_CASE__ = MaskFormerForInstanceSegmentation(UpperCamelCase__ ) model.eval() for name, param in model.named_parameters(): print(UpperCamelCase__ , param.shape ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(UpperCamelCase__ ) == 0, f'''Unexpected keys: {unexpected_keys}''' # verify results SCREAMING_SNAKE_CASE__ = prepare_img() if "vistas" in model_name: SCREAMING_SNAKE_CASE__ = 65 elif "cityscapes" in model_name: SCREAMING_SNAKE_CASE__ = 65_535 else: SCREAMING_SNAKE_CASE__ = 255 SCREAMING_SNAKE_CASE__ = True if """ade""" in model_name else False SCREAMING_SNAKE_CASE__ = MaskFormerImageProcessor(ignore_index=UpperCamelCase__ , reduce_labels=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = image_processor(UpperCamelCase__ , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE__ = model(**UpperCamelCase__ ) print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": SCREAMING_SNAKE_CASE__ = torch.tensor( [[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' ) Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) image_processor.save_pretrained(UpperCamelCase__ ) if push_to_hub: print("""Pushing model and image processor to the hub...""" ) model.push_to_hub(f'''nielsr/{model_name}''' ) image_processor.push_to_hub(f'''nielsr/{model_name}''' ) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='maskformer-swin-tiny-ade', type=str, help=('Name of the MaskFormer model you\'d like to convert',), ) parser.add_argument( '--checkpoint_path', default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl', type=str, help='Path to the original state dict (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) _lowerCamelCase = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
6
0
'''simple docstring''' from __future__ import annotations def a__ ( lowerCAmelCase__ ) -> list[int]: return [ord(lowerCAmelCase__ ) - 96 for elem in plain] def a__ ( lowerCAmelCase__ ) -> str: return "".join(chr(elem + 96 ) for elem in encoded ) def a__ ( ) -> None: UpperCAmelCase__ : Optional[Any] = encode(input('''-> ''' ).strip().lower() ) print('''Encoded: ''' , lowerCAmelCase__ ) print('''Decoded:''' , decode(lowerCAmelCase__ ) ) if __name__ == "__main__": main()
75
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCamelCase = logging.get_logger(__name__) _lowerCamelCase = { 'nielsr/canine-s': 2048, } # Unicode defines 1,114,112 total “codepoints” _lowerCamelCase = 1114112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py _lowerCamelCase = 0 _lowerCamelCase = 0XE0_00 _lowerCamelCase = 0XE0_01 _lowerCamelCase = 0XE0_02 _lowerCamelCase = 0XE0_03 _lowerCamelCase = 0XE0_04 # Maps special codepoints to human-readable names. _lowerCamelCase = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. _lowerCamelCase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self :str , __A :str=chr(__A ) , __A :str=chr(__A ) , __A :Dict=chr(__A ) , __A :str=chr(__A ) , __A :Union[str, Any]=chr(__A ) , __A :str=chr(__A ) , __A :int=False , __A :int=2048 , **__A :Dict , ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token super().__init__( bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , model_max_length=__A , **__A , ) # Creates a mapping for looking up the IDs of special symbols. SCREAMING_SNAKE_CASE__ = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): SCREAMING_SNAKE_CASE__ = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. SCREAMING_SNAKE_CASE__ = { codepoint: name for name, codepoint in self._special_codepoints.items() } SCREAMING_SNAKE_CASE__ = UNICODE_VOCAB_SIZE SCREAMING_SNAKE_CASE__ = len(self._special_codepoints ) @property def _snake_case ( self :Optional[Any] ) -> int: """simple docstring""" return self._unicode_vocab_size def _snake_case ( self :Tuple , __A :str ) -> List[str]: """simple docstring""" return list(__A ) def _snake_case ( self :Optional[Any] , __A :str ) -> int: """simple docstring""" try: return ord(__A ) except TypeError: raise ValueError(f'''invalid token: \'{token}\'''' ) def _snake_case ( self :str , __A :int ) -> str: """simple docstring""" try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(__A ) except TypeError: raise ValueError(f'''invalid id: {index}''' ) def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Any: """simple docstring""" return "".join(__A ) def _snake_case ( self :Optional[Any] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [self.sep_token_id] SCREAMING_SNAKE_CASE__ = [self.cls_token_id] SCREAMING_SNAKE_CASE__ = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def _snake_case ( self :List[Any] , __A :List[int] , __A :Optional[List[int]] = None , __A :bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) SCREAMING_SNAKE_CASE__ = [1] + ([0] * len(__A )) + [1] if token_ids_a is not None: result += ([0] * len(__A )) + [1] return result def _snake_case ( self :List[str] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [self.sep_token_id] SCREAMING_SNAKE_CASE__ = [self.cls_token_id] SCREAMING_SNAKE_CASE__ = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def _snake_case ( self :int , __A :str , __A :Optional[str] = None ) -> Any: """simple docstring""" return ()
6
0
"""simple docstring""" import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer a_ = logging.get_logger(__name__) a_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} a_ = { 'vocab_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } a_ = { 'vocab_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } a_ = { 'vocab_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json' ), }, } a_ = { 'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2, 'facebook/dpr-ctx_encoder-multiset-base': 5_1_2, } a_ = { 'facebook/dpr-question_encoder-single-nq-base': 5_1_2, 'facebook/dpr-question_encoder-multiset-base': 5_1_2, } a_ = { 'facebook/dpr-reader-single-nq-base': 5_1_2, 'facebook/dpr-reader-multiset-base': 5_1_2, } a_ = { 'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True}, } a_ = { 'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True}, } a_ = { 'facebook/dpr-reader-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-reader-multiset-base': {'do_lower_case': True}, } class UpperCAmelCase_ ( snake_case ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class UpperCAmelCase_ ( snake_case ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION a_ = collections.namedtuple( 'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text'] ) a_ = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits']) a_ = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n ' @add_start_docstrings(snake_case ) class UpperCAmelCase_ : def __call__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> BatchEncoding: if titles is None and texts is None: return super().__call__( UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) elif titles is None or texts is None: __lowercase : int = titles if texts is None else texts return super().__call__( UpperCamelCase_ , UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) __lowercase : Optional[int] = titles if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [titles] __lowercase : Optional[int] = texts if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [texts] __lowercase : str = len(UpperCamelCase_ ) __lowercase : List[Any] = questions if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [questions] * n_passages if len(UpperCamelCase_ ) != len(UpperCamelCase_ ): raise ValueError( F"""There should be as many titles than texts but got {len(UpperCamelCase_ )} titles and {len(UpperCamelCase_ )} texts.""" ) __lowercase : int = super().__call__(UpperCamelCase_ , UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )['''input_ids'''] __lowercase : List[Any] = super().__call__(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )['''input_ids'''] __lowercase : Optional[Any] = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(UpperCamelCase_ , UpperCamelCase_ ) ] } if return_attention_mask is not False: __lowercase : str = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) __lowercase : List[str] = attention_mask return self.pad(UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 16 , UpperCamelCase_ = 64 , UpperCamelCase_ = 4 , ) -> List[DPRSpanPrediction]: __lowercase : List[Any] = reader_input['''input_ids'''] __lowercase ,__lowercase ,__lowercase : List[str] = reader_output[:3] __lowercase : Optional[int] = len(UpperCamelCase_ ) __lowercase : Any = sorted(range(UpperCamelCase_ ) , reverse=UpperCamelCase_ , key=relevance_logits.__getitem__ ) __lowercase : List[DPRReaderOutput] = [] for doc_id in sorted_docs: __lowercase : Any = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence __lowercase : Tuple = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: __lowercase : Optional[Any] = sequence_ids.index(self.pad_token_id ) else: __lowercase : List[Any] = len(UpperCamelCase_ ) __lowercase : List[str] = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCamelCase_ , top_spans=UpperCamelCase_ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCamelCase_ , start_index=UpperCamelCase_ , end_index=UpperCamelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(UpperCamelCase_ ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> List[DPRSpanPrediction]: __lowercase : Tuple = [] for start_index, start_score in enumerate(UpperCamelCase_ ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) __lowercase : int = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : x[1] , reverse=UpperCamelCase_ ) __lowercase : Optional[Any] = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" ) __lowercase : Any = end_index - start_index + 1 if length > max_answer_length: raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" ) if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(UpperCamelCase_ ) == top_spans: break return chosen_span_intervals @add_end_docstrings(snake_case ) class UpperCAmelCase_ ( snake_case , snake_case ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =READER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =READER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase =["input_ids", "attention_mask"]
76
import inspect import os import torch from transformers import AutoModel from transformers.testing_utils import mockenv_context from transformers.trainer_utils import set_seed import accelerate from accelerate.accelerator import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils.testing import ( AccelerateTestCase, TempDirTestCase, execute_subprocess_async, require_cuda, require_fsdp, require_multi_gpu, slow, ) from accelerate.utils.constants import ( FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, ) from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin from accelerate.utils.other import patch_environment set_seed(42) _lowerCamelCase = 'bert-base-cased' _lowerCamelCase = 'fp16' _lowerCamelCase = 'bf16' _lowerCamelCase = [FPaa, BFaa] @require_fsdp @require_cuda class UpperCamelCase_ ( UpperCamelCase__ ): def _snake_case ( self :Optional[Any] ) -> Optional[Any]: """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE__ = dict( ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , ) def _snake_case ( self :List[Any] ) -> Tuple: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy for i, strategy in enumerate(__A ): SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = f'''{i + 1}''' SCREAMING_SNAKE_CASE__ = strategy with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) ) def _snake_case ( self :int ) -> List[str]: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch for i, prefetch_policy in enumerate(__A ): SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = prefetch_policy with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() if prefetch_policy == "NO_PREFETCH": self.assertIsNone(fsdp_plugin.backward_prefetch ) else: self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) ) def _snake_case ( self :List[str] ) -> List[str]: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType for i, state_dict_type in enumerate(__A ): SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = state_dict_type with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) ) if state_dict_type == "FULL_STATE_DICT": self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu ) self.assertTrue(fsdp_plugin.state_dict_config.ranka_only ) def _snake_case ( self :str ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = AutoModel.from_pretrained(__A ) for policy in FSDP_AUTO_WRAP_POLICY: SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = policy if policy == "TRANSFORMER_BASED_WRAP": SCREAMING_SNAKE_CASE__ = """BertLayer""" elif policy == "SIZE_BASED_WRAP": SCREAMING_SNAKE_CASE__ = """2000""" with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__A ) if policy == "NO_WRAP": self.assertIsNone(fsdp_plugin.auto_wrap_policy ) else: self.assertIsNotNone(fsdp_plugin.auto_wrap_policy ) SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = """TRANSFORMER_BASED_WRAP""" SCREAMING_SNAKE_CASE__ = """T5Layer""" with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() with self.assertRaises(__A ) as cm: fsdp_plugin.set_auto_wrap_policy(__A ) self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) ) SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = """SIZE_BASED_WRAP""" SCREAMING_SNAKE_CASE__ = """0""" with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__A ) self.assertIsNone(fsdp_plugin.auto_wrap_policy ) def _snake_case ( self :Optional[Any] ) -> Optional[int]: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = mp_dtype with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = Accelerator() if mp_dtype == "fp16": SCREAMING_SNAKE_CASE__ = torch.floataa elif mp_dtype == "bf16": SCREAMING_SNAKE_CASE__ = torch.bfloataa SCREAMING_SNAKE_CASE__ = MixedPrecision(param_dtype=__A , reduce_dtype=__A , buffer_dtype=__A ) self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __A ) if mp_dtype == FPaa: self.assertTrue(isinstance(accelerator.scaler , __A ) ) elif mp_dtype == BFaa: self.assertIsNone(accelerator.scaler ) AcceleratorState._reset_state(__A ) def _snake_case ( self :str ) -> str: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload for flag in [True, False]: SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = str(__A ).lower() with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__A ) ) @require_fsdp @require_multi_gpu @slow class UpperCamelCase_ ( UpperCamelCase__ ): def _snake_case ( self :Any ) -> Any: """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE__ = 0.8_2 SCREAMING_SNAKE_CASE__ = [ """fsdp_shard_grad_op_transformer_based_wrap""", """fsdp_full_shard_transformer_based_wrap""", ] SCREAMING_SNAKE_CASE__ = { """multi_gpu_fp16""": 3200, """fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000, """fsdp_full_shard_transformer_based_wrap_fp16""": 1900, # Disabling below test as it overwhelms the RAM memory usage # on CI self-hosted runner leading to tests getting killed. # "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang } SCREAMING_SNAKE_CASE__ = 160 SCREAMING_SNAKE_CASE__ = 160 SCREAMING_SNAKE_CASE__ = inspect.getfile(accelerate.test_utils ) SCREAMING_SNAKE_CASE__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] ) def _snake_case ( self :Union[str, Any] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_performance.py""" ) SCREAMING_SNAKE_CASE__ = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""] for config in self.performance_configs: SCREAMING_SNAKE_CASE__ = cmd.copy() for i, strategy in enumerate(__A ): if strategy.lower() in config: cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' ) break if "fp32" in config: cmd_config.append("""--mixed_precision=no""" ) else: cmd_config.append("""--mixed_precision=fp16""" ) if "cpu_offload" in config: cmd_config.append("""--fsdp_offload_params=True""" ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in config: cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" ) elif policy == "SIZE_BASED_WRAP": cmd_config.append("""--fsdp_min_num_params=2000""" ) cmd_config.extend( [ self.test_file_path, f'''--output_dir={self.tmpdir}''', f'''--performance_lower_bound={self.performance_lower_bound}''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A , env=os.environ.copy() ) def _snake_case ( self :Dict ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" ) SCREAMING_SNAKE_CASE__ = [ """accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp""", """--mixed_precision=fp16""", """--fsdp_transformer_layer_cls_to_wrap=BertLayer""", ] for i, strategy in enumerate(__A ): SCREAMING_SNAKE_CASE__ = cmd.copy() cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' ) if strategy != "FULL_SHARD": continue SCREAMING_SNAKE_CASE__ = len(__A ) for state_dict_type in FSDP_STATE_DICT_TYPE: SCREAMING_SNAKE_CASE__ = cmd_config[:state_dict_config_index] cmd_config.append(f'''--fsdp_state_dict_type={state_dict_type}''' ) cmd_config.extend( [ self.test_file_path, f'''--output_dir={self.tmpdir}''', """--partial_train_epoch=1""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A , env=os.environ.copy() ) SCREAMING_SNAKE_CASE__ = cmd_config[:-1] SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdir , """epoch_0""" ) cmd_config.extend( [ f'''--resume_from_checkpoint={resume_from_checkpoint}''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A , env=os.environ.copy() ) def _snake_case ( self :Tuple ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" ) SCREAMING_SNAKE_CASE__ = [ """accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", ] for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): SCREAMING_SNAKE_CASE__ = cmd.copy() if "fp16" in spec: cmd_config.extend(["""--mixed_precision=fp16"""] ) else: cmd_config.extend(["""--mixed_precision=no"""] ) if "multi_gpu" in spec: continue else: cmd_config.extend(["""--use_fsdp"""] ) for i, strategy in enumerate(__A ): if strategy.lower() in spec: cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' ) break if "cpu_offload" in spec: cmd_config.append("""--fsdp_offload_params=True""" ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in spec: cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" ) elif policy == "SIZE_BASED_WRAP": cmd_config.append("""--fsdp_min_num_params=2000""" ) cmd_config.extend( [ self.test_file_path, f'''--output_dir={self.tmpdir}''', f'''--peak_memory_upper_bound={peak_mem_upper_bound}''', f'''--n_train={self.n_train}''', f'''--n_val={self.n_val}''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A , env=os.environ.copy() )
6
0
"""simple docstring""" def _UpperCamelCase ( UpperCamelCase ) -> float: """simple docstring""" __UpperCAmelCase : List[Any] = 0 while len(UpperCamelCase ) > 1: __UpperCAmelCase : Optional[int] = 0 # Consider two files with minimum cost to be merged for _ in range(2 ): __UpperCAmelCase : Dict = files.index(min(UpperCamelCase ) ) temp += files[min_index] files.pop(UpperCamelCase ) files.append(UpperCamelCase ) optimal_merge_cost += temp return optimal_merge_cost if __name__ == "__main__": import doctest doctest.testmod()
77
import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig _lowerCamelCase = logging.get_logger(__name__) # General docstring _lowerCamelCase = 'PoolFormerConfig' # Base docstring _lowerCamelCase = 'sail/poolformer_s12' _lowerCamelCase = [1, 512, 7, 7] # Image classification docstring _lowerCamelCase = 'sail/poolformer_s12' _lowerCamelCase = 'tabby, tabby cat' _lowerCamelCase = [ 'sail/poolformer_s12', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: float = 0.0 , UpperCamelCase__: bool = False ): if drop_prob == 0.0 or not training: return input SCREAMING_SNAKE_CASE__ = 1 - drop_prob SCREAMING_SNAKE_CASE__ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets SCREAMING_SNAKE_CASE__ = keep_prob + torch.rand(UpperCamelCase__ , dtype=input.dtype , device=input.device ) random_tensor.floor_() # binarize SCREAMING_SNAKE_CASE__ = input.div(UpperCamelCase__ ) * random_tensor return output class UpperCamelCase_ ( nn.Module ): def __init__( self :Optional[Any] , __A :Optional[float] = None ) -> None: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = drop_prob def _snake_case ( self :Any , __A :torch.Tensor ) -> torch.Tensor: """simple docstring""" return drop_path(__A , self.drop_prob , self.training ) def _snake_case ( self :Dict ) -> str: """simple docstring""" return "p={}".format(self.drop_prob ) class UpperCamelCase_ ( nn.Module ): def __init__( self :Dict , __A :Optional[Any] , __A :Dict , __A :List[str] , __A :Optional[Any] , __A :Tuple , __A :Optional[Any]=None ) -> Union[str, Any]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = patch_size if isinstance(__A , collections.abc.Iterable ) else (patch_size, patch_size) SCREAMING_SNAKE_CASE__ = stride if isinstance(__A , collections.abc.Iterable ) else (stride, stride) SCREAMING_SNAKE_CASE__ = padding if isinstance(__A , collections.abc.Iterable ) else (padding, padding) SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , kernel_size=__A , stride=__A , padding=__A ) SCREAMING_SNAKE_CASE__ = norm_layer(__A ) if norm_layer else nn.Identity() def _snake_case ( self :Dict , __A :Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.projection(__A ) SCREAMING_SNAKE_CASE__ = self.norm(__A ) return embeddings class UpperCamelCase_ ( nn.GroupNorm ): def __init__( self :Dict , __A :Tuple , **__A :Union[str, Any] ) -> Dict: """simple docstring""" super().__init__(1 , __A , **__A ) class UpperCamelCase_ ( nn.Module ): def __init__( self :List[str] , __A :Optional[int] ) -> Any: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = nn.AvgPoolad(__A , stride=1 , padding=pool_size // 2 , count_include_pad=__A ) def _snake_case ( self :Any , __A :Optional[Any] ) -> Optional[Any]: """simple docstring""" return self.pool(__A ) - hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self :Optional[Any] , __A :Tuple , __A :Dict , __A :int , __A :Any ) -> str: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 ) SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 ) SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A ) if isinstance(config.hidden_act , __A ): SCREAMING_SNAKE_CASE__ = ACTaFN[config.hidden_act] else: SCREAMING_SNAKE_CASE__ = config.hidden_act def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.conva(__A ) SCREAMING_SNAKE_CASE__ = self.act_fn(__A ) SCREAMING_SNAKE_CASE__ = self.drop(__A ) SCREAMING_SNAKE_CASE__ = self.conva(__A ) SCREAMING_SNAKE_CASE__ = self.drop(__A ) return hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self :Any , __A :str , __A :List[str] , __A :Tuple , __A :Dict , __A :Union[str, Any] , __A :int ) -> Optional[int]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = PoolFormerPooling(__A ) SCREAMING_SNAKE_CASE__ = PoolFormerOutput(__A , __A , __A , __A ) SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A ) SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A ) # Useful for training neural nets SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A ) if drop_path > 0.0 else nn.Identity() SCREAMING_SNAKE_CASE__ = config.use_layer_scale if config.use_layer_scale: SCREAMING_SNAKE_CASE__ = nn.Parameter( config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A ) SCREAMING_SNAKE_CASE__ = nn.Parameter( config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A ) def _snake_case ( self :Optional[Any] , __A :Optional[int] ) -> str: """simple docstring""" if self.use_layer_scale: SCREAMING_SNAKE_CASE__ = self.pooling(self.before_norm(__A ) ) SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A ) SCREAMING_SNAKE_CASE__ = () SCREAMING_SNAKE_CASE__ = self.output(self.after_norm(__A ) ) SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A ) SCREAMING_SNAKE_CASE__ = (output,) + outputs return outputs else: SCREAMING_SNAKE_CASE__ = self.drop_path(self.pooling(self.before_norm(__A ) ) ) # First residual connection SCREAMING_SNAKE_CASE__ = pooling_output + hidden_states SCREAMING_SNAKE_CASE__ = () # Second residual connection inside the PoolFormerOutput block SCREAMING_SNAKE_CASE__ = self.drop_path(self.output(self.after_norm(__A ) ) ) SCREAMING_SNAKE_CASE__ = hidden_states + layer_output SCREAMING_SNAKE_CASE__ = (output,) + outputs return outputs class UpperCamelCase_ ( nn.Module ): def __init__( self :Union[str, Any] , __A :List[Any] ) -> Union[str, Any]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = config # stochastic depth decay rule SCREAMING_SNAKE_CASE__ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings SCREAMING_SNAKE_CASE__ = [] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A ) # Transformer blocks SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = 0 for i in range(config.num_encoder_blocks ): # each block consists of layers SCREAMING_SNAKE_CASE__ = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( __A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(__A ) ) SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A ) def _snake_case ( self :str , __A :Tuple , __A :Dict=False , __A :Tuple=True ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = () if output_hidden_states else None SCREAMING_SNAKE_CASE__ = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = layers # Get patch embeddings from hidden_states SCREAMING_SNAKE_CASE__ = embedding_layer(__A ) # Send the embeddings through the blocks for _, blk in enumerate(__A ): SCREAMING_SNAKE_CASE__ = blk(__A ) SCREAMING_SNAKE_CASE__ = layer_outputs[0] if output_hidden_states: SCREAMING_SNAKE_CASE__ = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=__A , hidden_states=__A ) class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = PoolFormerConfig lowerCamelCase_ = "poolformer" lowerCamelCase_ = "pixel_values" lowerCamelCase_ = True def _snake_case ( self :Optional[Any] , __A :Tuple ) -> Dict: """simple docstring""" if isinstance(__A , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__A , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def _snake_case ( self :str , __A :Optional[Any] , __A :Union[str, Any]=False ) -> Any: """simple docstring""" if isinstance(__A , __A ): SCREAMING_SNAKE_CASE__ = value _lowerCamelCase = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' _lowerCamelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n' @add_start_docstrings( "The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , ) class UpperCamelCase_ ( UpperCamelCase__ ): def __init__( self :Union[str, Any] , __A :Any ) -> int: """simple docstring""" super().__init__(__A ) SCREAMING_SNAKE_CASE__ = config SCREAMING_SNAKE_CASE__ = PoolFormerEncoder(__A ) # Initialize weights and apply final processing self.post_init() def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(__A ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__A , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _snake_case ( self :Dict , __A :Optional[torch.FloatTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]: """simple docstring""" SCREAMING_SNAKE_CASE__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("""You have to specify pixel_values""" ) SCREAMING_SNAKE_CASE__ = self.encoder( __A , output_hidden_states=__A , return_dict=__A , ) SCREAMING_SNAKE_CASE__ = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=__A , hidden_states=encoder_outputs.hidden_states , ) class UpperCamelCase_ ( nn.Module ): def __init__( self :int , __A :Optional[int] ) -> Tuple: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = nn.Linear(config.hidden_size , config.hidden_size ) def _snake_case ( self :List[Any] , __A :Dict ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.dense(__A ) return output @add_start_docstrings( "\n PoolFormer Model transformer with an image classification head on top\n " , UpperCamelCase__ , ) class UpperCamelCase_ ( UpperCamelCase__ ): def __init__( self :str , __A :Union[str, Any] ) -> int: """simple docstring""" super().__init__(__A ) SCREAMING_SNAKE_CASE__ = config.num_labels SCREAMING_SNAKE_CASE__ = PoolFormerModel(__A ) # Final norm SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head SCREAMING_SNAKE_CASE__ = ( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__A ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _snake_case ( self :int , __A :Optional[torch.FloatTensor] = None , __A :Optional[torch.LongTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]: """simple docstring""" SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict SCREAMING_SNAKE_CASE__ = self.poolformer( __A , output_hidden_states=__A , return_dict=__A , ) SCREAMING_SNAKE_CASE__ = outputs[0] SCREAMING_SNAKE_CASE__ = self.classifier(self.norm(__A ).mean([-2, -1] ) ) SCREAMING_SNAKE_CASE__ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: SCREAMING_SNAKE_CASE__ = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): SCREAMING_SNAKE_CASE__ = """single_label_classification""" else: SCREAMING_SNAKE_CASE__ = """multi_label_classification""" if self.config.problem_type == "regression": SCREAMING_SNAKE_CASE__ = MSELoss() if self.num_labels == 1: SCREAMING_SNAKE_CASE__ = loss_fct(logits.squeeze() , labels.squeeze() ) else: SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A ) elif self.config.problem_type == "single_label_classification": SCREAMING_SNAKE_CASE__ = CrossEntropyLoss() SCREAMING_SNAKE_CASE__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": SCREAMING_SNAKE_CASE__ = BCEWithLogitsLoss() SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A ) if not return_dict: SCREAMING_SNAKE_CASE__ = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=__A , logits=__A , hidden_states=outputs.hidden_states )
6
0
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__) SCREAMING_SNAKE_CASE_: List[str] ={ 'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class __A ( UpperCamelCase__ ): a__ : Optional[Any] = """sew-d""" def __init__(self : Union[str, Any] , __a : int=32 , __a : int=768 , __a : int=12 , __a : str=12 , __a : List[Any]=3072 , __a : Optional[Any]=2 , __a : Optional[Any]=512 , __a : Tuple=256 , __a : Optional[int]=True , __a : int=True , __a : Dict=("p2c", "c2p") , __a : Dict="layer_norm" , __a : Tuple="gelu_python" , __a : List[str]=0.1 , __a : Any=0.1 , __a : Union[str, Any]=0.1 , __a : Union[str, Any]=0.0 , __a : Any=0.1 , __a : Any=0.02 , __a : int=1E-7 , __a : Tuple=1E-5 , __a : Union[str, Any]="group" , __a : str="gelu" , __a : Tuple=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __a : Optional[int]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __a : Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __a : int=False , __a : List[Any]=128 , __a : List[Any]=16 , __a : Optional[Any]=True , __a : Dict=0.05 , __a : List[Any]=10 , __a : Any=2 , __a : Tuple=0.0 , __a : Dict=10 , __a : Tuple=0 , __a : Optional[Any]="mean" , __a : Optional[int]=False , __a : Dict=False , __a : Any=256 , __a : str=0 , __a : Optional[Any]=1 , __a : Any=2 , **__a : Any , ): super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a ) UpperCAmelCase_ = hidden_size UpperCAmelCase_ = feat_extract_norm UpperCAmelCase_ = feat_extract_activation UpperCAmelCase_ = list(__a ) UpperCAmelCase_ = list(__a ) UpperCAmelCase_ = list(__a ) UpperCAmelCase_ = conv_bias UpperCAmelCase_ = num_conv_pos_embeddings UpperCAmelCase_ = num_conv_pos_embedding_groups UpperCAmelCase_ = len(self.conv_dim ) UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = squeeze_factor UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = position_buckets UpperCAmelCase_ = share_att_key UpperCAmelCase_ = relative_attention UpperCAmelCase_ = norm_rel_ebd UpperCAmelCase_ = list(__a ) UpperCAmelCase_ = hidden_act UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = hidden_dropout UpperCAmelCase_ = attention_dropout UpperCAmelCase_ = activation_dropout UpperCAmelCase_ = feat_proj_dropout UpperCAmelCase_ = final_dropout UpperCAmelCase_ = layer_norm_eps UpperCAmelCase_ = feature_layer_norm_eps UpperCAmelCase_ = initializer_range UpperCAmelCase_ = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect." "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`," f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCAmelCase_ = apply_spec_augment UpperCAmelCase_ = mask_time_prob UpperCAmelCase_ = mask_time_length UpperCAmelCase_ = mask_time_min_masks UpperCAmelCase_ = mask_feature_prob UpperCAmelCase_ = mask_feature_length UpperCAmelCase_ = mask_feature_min_masks # ctc loss UpperCAmelCase_ = ctc_loss_reduction UpperCAmelCase_ = ctc_zero_infinity # sequence classification UpperCAmelCase_ = use_weighted_layer_sum UpperCAmelCase_ = classifier_proj_size @property def _lowercase (self : str ): return functools.reduce(operator.mul , self.conv_stride , 1 )
78
import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ ( UpperCamelCase__ ): def __init__( self :Union[str, Any] , __A :Optional[int] , __A :Tuple=13 , __A :Dict=7 , __A :Dict=True , __A :str=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Any=False , __A :Dict=False , __A :Any=False , __A :Tuple=2 , __A :Dict=99 , __A :Optional[Any]=0 , __A :List[str]=32 , __A :Optional[int]=5 , __A :Dict=4 , __A :List[str]=0.1 , __A :Union[str, Any]=0.1 , __A :Tuple=512 , __A :Any=12 , __A :Optional[int]=2 , __A :Union[str, Any]=0.0_2 , __A :Dict=3 , __A :Optional[int]=4 , __A :Any="last" , __A :List[Any]=None , __A :Any=None , ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = parent SCREAMING_SNAKE_CASE__ = batch_size SCREAMING_SNAKE_CASE__ = seq_length SCREAMING_SNAKE_CASE__ = is_training SCREAMING_SNAKE_CASE__ = use_input_lengths SCREAMING_SNAKE_CASE__ = use_token_type_ids SCREAMING_SNAKE_CASE__ = use_labels SCREAMING_SNAKE_CASE__ = gelu_activation SCREAMING_SNAKE_CASE__ = sinusoidal_embeddings SCREAMING_SNAKE_CASE__ = causal SCREAMING_SNAKE_CASE__ = asm SCREAMING_SNAKE_CASE__ = n_langs SCREAMING_SNAKE_CASE__ = vocab_size SCREAMING_SNAKE_CASE__ = n_special SCREAMING_SNAKE_CASE__ = hidden_size SCREAMING_SNAKE_CASE__ = num_hidden_layers SCREAMING_SNAKE_CASE__ = num_attention_heads SCREAMING_SNAKE_CASE__ = hidden_dropout_prob SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ = max_position_embeddings SCREAMING_SNAKE_CASE__ = type_vocab_size SCREAMING_SNAKE_CASE__ = type_sequence_label_size SCREAMING_SNAKE_CASE__ = initializer_range SCREAMING_SNAKE_CASE__ = num_labels SCREAMING_SNAKE_CASE__ = num_choices SCREAMING_SNAKE_CASE__ = summary_type SCREAMING_SNAKE_CASE__ = use_proj SCREAMING_SNAKE_CASE__ = scope def _snake_case ( self :Optional[Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE__ = None if self.use_input_lengths: SCREAMING_SNAKE_CASE__ = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length SCREAMING_SNAKE_CASE__ = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None if self.use_labels: SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , 2 ).float() SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE__ = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _snake_case ( self :List[str] ) -> Optional[int]: """simple docstring""" return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def _snake_case ( self :Tuple , __A :str , __A :int , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[int] , __A :Union[str, Any] , __A :Union[str, Any] , __A :str , ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertModel(config=__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A , lengths=__A , langs=__A ) SCREAMING_SNAKE_CASE__ = model(__A , langs=__A ) SCREAMING_SNAKE_CASE__ = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self :str , __A :Any , __A :str , __A :Union[str, Any] , __A :Optional[Any] , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[Any] , __A :Union[str, Any] , ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertWithLMHeadModel(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self :Tuple , __A :Union[str, Any] , __A :Optional[Any] , __A :Dict , __A :Dict , __A :Union[str, Any] , __A :List[str] , __A :Optional[int] , __A :int , __A :str , ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnsweringSimple(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A ) SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self :List[str] , __A :Any , __A :int , __A :Tuple , __A :Optional[Any] , __A :Tuple , __A :Optional[int] , __A :str , __A :int , __A :str , ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnswering(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A ) SCREAMING_SNAKE_CASE__ = model( __A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , p_mask=__A , ) SCREAMING_SNAKE_CASE__ = model( __A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , ) ((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple() SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A ) ((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def _snake_case ( self :Optional[int] , __A :str , __A :Optional[int] , __A :Tuple , __A :Dict , __A :List[str] , __A :Tuple , __A :List[str] , __A :Dict , __A :List[str] , ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertForSequenceClassification(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A ) SCREAMING_SNAKE_CASE__ = model(__A , labels=__A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _snake_case ( self :Optional[Any] , __A :Optional[Any] , __A :Optional[Any] , __A :List[str] , __A :Optional[Any] , __A :int , __A :Tuple , __A :Optional[int] , __A :Union[str, Any] , __A :Dict , ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.num_labels SCREAMING_SNAKE_CASE__ = FlaubertForTokenClassification(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self :str , __A :Any , __A :Tuple , __A :List[str] , __A :Tuple , __A :Any , __A :int , __A :Dict , __A :List[str] , __A :Tuple , ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.num_choices SCREAMING_SNAKE_CASE__ = FlaubertForMultipleChoice(config=__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ = model( __A , attention_mask=__A , token_type_ids=__A , labels=__A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _snake_case ( self :Union[str, Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) = config_and_inputs SCREAMING_SNAKE_CASE__ = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): lowerCamelCase_ = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) lowerCamelCase_ = ( { "feature-extraction": FlaubertModel, "fill-mask": FlaubertWithLMHeadModel, "question-answering": FlaubertForQuestionAnsweringSimple, "text-classification": FlaubertForSequenceClassification, "token-classification": FlaubertForTokenClassification, "zero-shot": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def _snake_case ( self :Any , __A :Optional[int] , __A :Optional[int] , __A :Dict , __A :List[Any] , __A :Tuple ) -> str: """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _snake_case ( self :Tuple , __A :List[str] , __A :Optional[int] , __A :Dict=False ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(__A , __A , return_labels=__A ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": SCREAMING_SNAKE_CASE__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__A ) SCREAMING_SNAKE_CASE__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__A ) return inputs_dict def _snake_case ( self :str ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertModelTester(self ) SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__A , emb_dim=37 ) def _snake_case ( self :int ) -> int: """simple docstring""" self.config_tester.run_common_tests() def _snake_case ( self :Optional[Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*__A ) def _snake_case ( self :Tuple ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*__A ) def _snake_case ( self :str ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*__A ) def _snake_case ( self :Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*__A ) def _snake_case ( self :str ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*__A ) def _snake_case ( self :Any ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*__A ) def _snake_case ( self :Any ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*__A ) @slow def _snake_case ( self :Union[str, Any] ) -> List[str]: """simple docstring""" for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @slow @require_torch_gpu def _snake_case ( self :Tuple ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = model_class(config=__A ) SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__A , __A ) SCREAMING_SNAKE_CASE__ = torch.jit.trace( __A , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__A , os.path.join(__A , """traced_model.pt""" ) ) SCREAMING_SNAKE_CASE__ = torch.jit.load(os.path.join(__A , """traced_model.pt""" ) , map_location=__A ) loaded(inputs_dict["""input_ids"""].to(__A ) , inputs_dict["""attention_mask"""].to(__A ) ) @require_torch class UpperCamelCase_ ( unittest.TestCase ): @slow def _snake_case ( self :Dict ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" ) SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) with torch.no_grad(): SCREAMING_SNAKE_CASE__ = model(__A )[0] SCREAMING_SNAKE_CASE__ = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __A ) SCREAMING_SNAKE_CASE__ = torch.tensor( [[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1E-4 ) )
6
0
from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Any = { """huggingface/informer-tourism-monthly""": ( """https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json""" ), # See all Informer models at https://huggingface.co/models?filter=informer } class UpperCAmelCase_ ( __lowerCamelCase ): __lowerCamelCase = 'informer' __lowerCamelCase = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = "student_t" , _lowerCAmelCase = "nll" , _lowerCAmelCase = 1 , _lowerCAmelCase = None , _lowerCAmelCase = "mean" , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 64 , _lowerCAmelCase = 32 , _lowerCAmelCase = 32 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = 2 , _lowerCAmelCase = True , _lowerCAmelCase = "gelu" , _lowerCAmelCase = 0.0_5 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 100 , _lowerCAmelCase = 0.0_2 , _lowerCAmelCase=True , _lowerCAmelCase = "prob" , _lowerCAmelCase = 5 , _lowerCAmelCase = True , **_lowerCAmelCase , ): # time series specific configuration UpperCAmelCase__ : List[str] = prediction_length UpperCAmelCase__ : Optional[Any] = context_length or prediction_length UpperCAmelCase__ : str = distribution_output UpperCAmelCase__ : int = loss UpperCAmelCase__ : Optional[Any] = input_size UpperCAmelCase__ : Any = num_time_features UpperCAmelCase__ : int = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] UpperCAmelCase__ : Union[str, Any] = scaling UpperCAmelCase__ : Optional[Any] = num_dynamic_real_features UpperCAmelCase__ : List[str] = num_static_real_features UpperCAmelCase__ : str = num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(_lowerCAmelCase ) != num_static_categorical_features: raise ValueError( """The cardinality should be a list of the same length as `num_static_categorical_features`""" ) UpperCAmelCase__ : List[str] = cardinality else: UpperCAmelCase__ : Optional[Any] = [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(_lowerCAmelCase ) != num_static_categorical_features: raise ValueError( """The embedding dimension should be a list of the same length as `num_static_categorical_features`""" ) UpperCAmelCase__ : str = embedding_dimension else: UpperCAmelCase__ : List[str] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] UpperCAmelCase__ : Union[str, Any] = num_parallel_samples # Transformer architecture configuration UpperCAmelCase__ : Dict = input_size * len(self.lags_sequence ) + self._number_of_features UpperCAmelCase__ : Any = d_model UpperCAmelCase__ : int = encoder_attention_heads UpperCAmelCase__ : Optional[Any] = decoder_attention_heads UpperCAmelCase__ : int = encoder_ffn_dim UpperCAmelCase__ : Tuple = decoder_ffn_dim UpperCAmelCase__ : List[Any] = encoder_layers UpperCAmelCase__ : Optional[Any] = decoder_layers UpperCAmelCase__ : Tuple = dropout UpperCAmelCase__ : int = attention_dropout UpperCAmelCase__ : List[str] = activation_dropout UpperCAmelCase__ : Any = encoder_layerdrop UpperCAmelCase__ : Union[str, Any] = decoder_layerdrop UpperCAmelCase__ : Tuple = activation_function UpperCAmelCase__ : Dict = init_std UpperCAmelCase__ : str = use_cache # Informer UpperCAmelCase__ : Union[str, Any] = attention_type UpperCAmelCase__ : int = sampling_factor UpperCAmelCase__ : Any = distil super().__init__(is_encoder_decoder=_lowerCAmelCase , **_lowerCAmelCase ) @property def __UpperCAmelCase ( self ): return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
79
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Union[str, Any] ): for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})''' def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple=True ): model.train() SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = F.mse_loss(UpperCamelCase__ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: List[Any]=False ): set_seed(42 ) SCREAMING_SNAKE_CASE__ = RegressionModel() SCREAMING_SNAKE_CASE__ = deepcopy(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 ) SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 ) model.to(accelerator.device ) if sched: SCREAMING_SNAKE_CASE__ = AdamW(params=model.parameters() , lr=1e-3 ) SCREAMING_SNAKE_CASE__ = AdamW(params=ddp_model.parameters() , lr=1e-3 ) SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 ) SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 ) # Make a copy of `model` if sched: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ): # Test when on a single CPU or GPU that the context manager does nothing SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ ) # Use a single batch SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: # Sync grads step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )] def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ): # Test on distributed setup that context manager behaves properly SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ ) # Use a single batch SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: # Sync grads step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )] def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int=False , UpperCamelCase__: Union[str, Any]=False ): SCREAMING_SNAKE_CASE__ = Accelerator( split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ ) for iteration, batch in enumerate(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values() # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCamelCase__ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )] GradientState._reset_state() def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple=False , UpperCamelCase__: List[str]=False ): SCREAMING_SNAKE_CASE__ = Accelerator( split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ , UpperCamelCase__ ) for iteration, batch in enumerate(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values() # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCamelCase__ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n''' SCREAMING_SNAKE_CASE__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCamelCase__ )) if accelerator.num_processes > 1: check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) GradientState._reset_state() def SCREAMING_SNAKE_CASE__ ( ): SCREAMING_SNAKE_CASE__ = Accelerator() SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 ) SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 ) SCREAMING_SNAKE_CASE__ = RegressionDataset(length=96 ) SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(UpperCamelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ ) if iteration < len(UpperCamelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(UpperCamelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ ) if batch_num < len(UpperCamelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def SCREAMING_SNAKE_CASE__ ( ): SCREAMING_SNAKE_CASE__ = Accelerator() SCREAMING_SNAKE_CASE__ = accelerator.state if state.local_process_index == 0: print("""**Test `accumulate` gradient accumulation with dataloader break**""" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("""**Test NOOP `no_sync` context manager**""" ) test_noop_sync(UpperCamelCase__ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("""**Test Distributed `no_sync` context manager**""" ) test_distributed_sync(UpperCamelCase__ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation(UpperCamelCase__ , UpperCamelCase__ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation_with_opt_and_scheduler(UpperCamelCase__ , UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
6
0
import math def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = [] __lowercase = 2 __lowercase = int(math.sqrt(lowerCamelCase ) ) # Size of every segment __lowercase = [True] * (end + 1) __lowercase = [] while start <= end: if temp[start] is True: in_prime.append(lowerCamelCase ) for i in range(start * start , end + 1 , lowerCamelCase ): __lowercase = False start += 1 prime += in_prime __lowercase = end + 1 __lowercase = min(2 * end , lowerCamelCase ) while low <= n: __lowercase = [True] * (high - low + 1) for each in in_prime: __lowercase = math.floor(low / each ) * each if t < low: t += each for j in range(lowerCamelCase , high + 1 , lowerCamelCase ): __lowercase = False for j in range(len(lowerCamelCase ) ): if temp[j] is True: prime.append(j + low ) __lowercase = high + 1 __lowercase = min(high + end , lowerCamelCase ) return prime print(sieve(10**6))
80
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = ["image_processor", "tokenizer"] lowerCamelCase_ = "AutoImageProcessor" lowerCamelCase_ = "AutoTokenizer" def __init__( self :Optional[int] , __A :Optional[Any] , __A :Dict ) -> Dict: """simple docstring""" super().__init__(__A , __A ) SCREAMING_SNAKE_CASE__ = self.image_processor def __call__( self :int , __A :str=None , __A :int=None , __A :Union[str, Any]=None , **__A :str ) -> Optional[Any]: """simple docstring""" if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , return_tensors=__A , **__A ) if images is not None: SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A ) if text is not None and images is not None: SCREAMING_SNAKE_CASE__ = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__A ) , tensor_type=__A ) def _snake_case ( self :str , *__A :List[str] , **__A :List[str] ) -> List[Any]: """simple docstring""" return self.tokenizer.batch_decode(*__A , **__A ) def _snake_case ( self :List[str] , *__A :Any , **__A :Any ) -> Tuple: """simple docstring""" return self.tokenizer.decode(*__A , **__A ) @property def _snake_case ( self :Dict ) -> List[Any]: """simple docstring""" return ["input_ids", "attention_mask", "pixel_values"]
6
0
# Algorithm for the pigeonhole sorting def lowerCAmelCase_ ( __lowerCamelCase ): __snake_case : Optional[Any] = min(__lowerCamelCase ) # min() finds the minimum value __snake_case : Any = max(__lowerCamelCase ) # max() finds the maximum value __snake_case : List[Any] = max_val - min_val + 1 # size is difference of max and min values plus one # list of pigeonholes of size equal to the variable size __snake_case : Optional[Any] = [0] * size # Populate the pigeonholes. for x in a: assert isinstance(__lowerCamelCase , __lowerCamelCase ), "integers only please" holes[x - min_val] += 1 # Putting the elements back into the array in an order. __snake_case : List[Any] = 0 for count in range(__lowerCamelCase ): while holes[count] > 0: holes[count] -= 1 __snake_case : Optional[Any] = count + min_val i += 1 def lowerCAmelCase_ ( ): __snake_case : int = [8, 3, 2, 7, 4, 6, 8] pigeonhole_sort(__lowerCamelCase ) print("Sorted order is:" , " ".join(__lowerCamelCase ) ) if __name__ == "__main__": main()
81
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ): SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = sum(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): SCREAMING_SNAKE_CASE__ = True for i in range(1 , s + 1 ): SCREAMING_SNAKE_CASE__ = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): SCREAMING_SNAKE_CASE__ = dp[i][j - 1] if arr[i - 1] <= j: SCREAMING_SNAKE_CASE__ = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: SCREAMING_SNAKE_CASE__ = s - 2 * j break return diff
6
0
"""simple docstring""" import logging from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import arg_to_scheduler from transformers import TrainingArguments lowerCamelCase = logging.getLogger(__name__) @dataclass class lowercase__ ( SCREAMING_SNAKE_CASE ): '''simple docstring''' UpperCamelCase = field( default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} ) UpperCamelCase = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to SortishSamler or not.'''} ) UpperCamelCase = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} ) UpperCamelCase = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''whether to use adafactor'''} ) UpperCamelCase = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} ) UpperCamelCase = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} ) UpperCamelCase = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} ) UpperCamelCase = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} ) UpperCamelCase = field( default='''linear''' , metadata={'''help''': F"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
82
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float ): if mass < 0: raise ValueError("""The mass of a body cannot be negative""" ) return 0.5 * mass * abs(UpperCamelCase__ ) * abs(UpperCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
6
0
"""simple docstring""" import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class __snake_case ( unittest.TestCase): @slow def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Any = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) _lowerCamelCase : Any = AutoTokenizer.from_pretrained('''google/mt5-small''' ) _lowerCamelCase : Union[str, Any] = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids _lowerCamelCase : Optional[int] = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids _lowerCamelCase : List[Any] = shift_tokens_right(__lowerCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id ) _lowerCamelCase : int = model(__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase ).logits _lowerCamelCase : Optional[Any] = optax.softmax_cross_entropy(__lowerCAmelCase , onehot(__lowerCAmelCase , logits.shape[-1] ) ).mean() _lowerCamelCase : Dict = -(labels.shape[-1] * loss.item()) _lowerCamelCase : Dict = -84.91_27 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
83
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = "encoder-decoder" lowerCamelCase_ = True def __init__( self :Optional[int] , **__A :str ) -> int: """simple docstring""" super().__init__(**__A ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" SCREAMING_SNAKE_CASE__ = kwargs.pop("""encoder""" ) SCREAMING_SNAKE_CASE__ = encoder_config.pop("""model_type""" ) SCREAMING_SNAKE_CASE__ = kwargs.pop("""decoder""" ) SCREAMING_SNAKE_CASE__ = decoder_config.pop("""model_type""" ) from ..auto.configuration_auto import AutoConfig SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A ) SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A ) SCREAMING_SNAKE_CASE__ = True @classmethod def _snake_case ( cls :str , __A :PretrainedConfig , __A :PretrainedConfig , **__A :List[str] ) -> PretrainedConfig: """simple docstring""" logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" ) SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__A ) def _snake_case ( self :str ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE__ = self.encoder.to_dict() SCREAMING_SNAKE_CASE__ = self.decoder.to_dict() SCREAMING_SNAKE_CASE__ = self.__class__.model_type return output
6
0
from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
84
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase__ ) class UpperCamelCase_ ( UpperCamelCase__ ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization lowerCamelCase_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) lowerCamelCase_ = Features({"text": Value("string" )} ) lowerCamelCase_ = Features({"labels": ClassLabel} ) lowerCamelCase_ = "text" lowerCamelCase_ = "labels" def _snake_case ( self :Any , __A :Dict ) -> Optional[Any]: """simple docstring""" if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , __A ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) SCREAMING_SNAKE_CASE__ = copy.deepcopy(self ) SCREAMING_SNAKE_CASE__ = self.label_schema.copy() SCREAMING_SNAKE_CASE__ = features[self.label_column] SCREAMING_SNAKE_CASE__ = label_schema return task_template @property def _snake_case ( self :str ) -> Dict[str, str]: """simple docstring""" return { self.text_column: "text", self.label_column: "labels", }
6
0
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class snake_case ( UpperCamelCase_ ): lowercase_ = (DPMSolverSDEScheduler,) lowercase_ = 10 def __lowercase( self : Any , **a_ : List[Any] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = { 'num_train_timesteps': 1100, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'noise_sampler_seed': 0, } config.update(**a_ ) return config def __lowercase( self : int )-> str: """simple docstring""" for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=a_ ) def __lowercase( self : Any )-> Tuple: """simple docstring""" for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=a_ , beta_end=a_ ) def __lowercase( self : List[Any] )-> Tuple: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=a_ ) def __lowercase( self : List[Any] )-> Union[str, Any]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=a_ ) def __lowercase( self : Union[str, Any] )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE__ : str = self.get_scheduler_config() SCREAMING_SNAKE_CASE__ : Optional[Any] = scheduler_class(**a_ ) scheduler.set_timesteps(self.num_inference_steps ) SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_model() SCREAMING_SNAKE_CASE__ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma SCREAMING_SNAKE_CASE__ : Optional[int] = sample.to(a_ ) for i, t in enumerate(scheduler.timesteps ): SCREAMING_SNAKE_CASE__ : Optional[Any] = scheduler.scale_model_input(a_ , a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = model(a_ , a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = scheduler.step(a_ , a_ , a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = output.prev_sample SCREAMING_SNAKE_CASE__ : Dict = torch.sum(torch.abs(a_ ) ) SCREAMING_SNAKE_CASE__ : Any = torch.mean(torch.abs(a_ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1e-2 assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1e-2 assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2 assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3 def __lowercase( self : Optional[int] )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = self.scheduler_classes[0] SCREAMING_SNAKE_CASE__ : Any = self.get_scheduler_config(prediction_type='v_prediction' ) SCREAMING_SNAKE_CASE__ : List[Any] = scheduler_class(**a_ ) scheduler.set_timesteps(self.num_inference_steps ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.dummy_model() SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma SCREAMING_SNAKE_CASE__ : Optional[int] = sample.to(a_ ) for i, t in enumerate(scheduler.timesteps ): SCREAMING_SNAKE_CASE__ : List[Any] = scheduler.scale_model_input(a_ , a_ ) SCREAMING_SNAKE_CASE__ : Any = model(a_ , a_ ) SCREAMING_SNAKE_CASE__ : List[str] = scheduler.step(a_ , a_ , a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = output.prev_sample SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.sum(torch.abs(a_ ) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.mean(torch.abs(a_ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1e-2 assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1e-2 assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1e-2 assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1e-3 def __lowercase( self : str )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = self.scheduler_classes[0] SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_scheduler_config() SCREAMING_SNAKE_CASE__ : int = scheduler_class(**a_ ) scheduler.set_timesteps(self.num_inference_steps , device=a_ ) SCREAMING_SNAKE_CASE__ : str = self.dummy_model() SCREAMING_SNAKE_CASE__ : Dict = self.dummy_sample_deter.to(a_ ) * scheduler.init_noise_sigma for t in scheduler.timesteps: SCREAMING_SNAKE_CASE__ : Optional[int] = scheduler.scale_model_input(a_ , a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ , a_ ) SCREAMING_SNAKE_CASE__ : Any = scheduler.step(a_ , a_ , a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = output.prev_sample SCREAMING_SNAKE_CASE__ : List[Any] = torch.sum(torch.abs(a_ ) ) SCREAMING_SNAKE_CASE__ : Any = torch.mean(torch.abs(a_ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1e-2 assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1e-2 assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2 assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1e-3 def __lowercase( self : str )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = self.scheduler_classes[0] SCREAMING_SNAKE_CASE__ : Any = self.get_scheduler_config() SCREAMING_SNAKE_CASE__ : Optional[Any] = scheduler_class(**a_ , use_karras_sigmas=a_ ) scheduler.set_timesteps(self.num_inference_steps , device=a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.dummy_model() SCREAMING_SNAKE_CASE__ : List[Any] = self.dummy_sample_deter.to(a_ ) * scheduler.init_noise_sigma SCREAMING_SNAKE_CASE__ : Optional[int] = sample.to(a_ ) for t in scheduler.timesteps: SCREAMING_SNAKE_CASE__ : Tuple = scheduler.scale_model_input(a_ , a_ ) SCREAMING_SNAKE_CASE__ : str = model(a_ , a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = scheduler.step(a_ , a_ , a_ ) SCREAMING_SNAKE_CASE__ : Tuple = output.prev_sample SCREAMING_SNAKE_CASE__ : List[Any] = torch.sum(torch.abs(a_ ) ) SCREAMING_SNAKE_CASE__ : str = torch.mean(torch.abs(a_ ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1e-2 assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1e-2
85
import argparse import torch from datasets import load_dataset from donut import DonutModel from transformers import ( DonutImageProcessor, DonutProcessor, DonutSwinConfig, DonutSwinModel, MBartConfig, MBartForCausalLM, VisionEncoderDecoderModel, XLMRobertaTokenizerFast, ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): SCREAMING_SNAKE_CASE__ = model.config SCREAMING_SNAKE_CASE__ = DonutSwinConfig( image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , ) SCREAMING_SNAKE_CASE__ = MBartConfig( is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , add_cross_attention=UpperCamelCase__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len( model.decoder.tokenizer ) , scale_embedding=UpperCamelCase__ , add_final_layer_norm=UpperCamelCase__ , ) return encoder_config, decoder_config def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ): if "encoder.model" in name: SCREAMING_SNAKE_CASE__ = name.replace("""encoder.model""" , """encoder""" ) if "decoder.model" in name: SCREAMING_SNAKE_CASE__ = name.replace("""decoder.model""" , """decoder""" ) if "patch_embed.proj" in name: SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if name.startswith("""encoder""" ): if "layers" in name: SCREAMING_SNAKE_CASE__ = """encoder.""" + name if "attn.proj" in name: SCREAMING_SNAKE_CASE__ = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name and "mask" not in name: SCREAMING_SNAKE_CASE__ = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: SCREAMING_SNAKE_CASE__ = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: SCREAMING_SNAKE_CASE__ = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc2""" , """output.dense""" ) if name == "encoder.norm.weight": SCREAMING_SNAKE_CASE__ = """encoder.layernorm.weight""" if name == "encoder.norm.bias": SCREAMING_SNAKE_CASE__ = """encoder.layernorm.bias""" return name def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Optional[int] ): for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE__ = orig_state_dict.pop(UpperCamelCase__ ) if "qkv" in key: SCREAMING_SNAKE_CASE__ = key.split(""".""" ) SCREAMING_SNAKE_CASE__ = int(key_split[3] ) SCREAMING_SNAKE_CASE__ = int(key_split[5] ) SCREAMING_SNAKE_CASE__ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: SCREAMING_SNAKE_CASE__ = val[:dim, :] SCREAMING_SNAKE_CASE__ = val[dim : dim * 2, :] SCREAMING_SNAKE_CASE__ = val[-dim:, :] else: SCREAMING_SNAKE_CASE__ = val[:dim] SCREAMING_SNAKE_CASE__ = val[dim : dim * 2] SCREAMING_SNAKE_CASE__ = val[-dim:] elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]: # HuggingFace implementation doesn't use attn_mask buffer # and model doesn't use final LayerNorms for the encoder pass else: SCREAMING_SNAKE_CASE__ = val return orig_state_dict def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int=None , UpperCamelCase__: str=False ): # load original model SCREAMING_SNAKE_CASE__ = DonutModel.from_pretrained(UpperCamelCase__ ).eval() # load HuggingFace model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_configs(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = DonutSwinModel(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = MBartForCausalLM(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ ) model.eval() SCREAMING_SNAKE_CASE__ = original_model.state_dict() SCREAMING_SNAKE_CASE__ = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ ) model.load_state_dict(UpperCamelCase__ ) # verify results on scanned document SCREAMING_SNAKE_CASE__ = load_dataset("""hf-internal-testing/example-documents""" ) SCREAMING_SNAKE_CASE__ = dataset["""test"""][0]["""image"""].convert("""RGB""" ) SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizerFast.from_pretrained(UpperCamelCase__ , from_slow=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = DonutImageProcessor( do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] ) SCREAMING_SNAKE_CASE__ = DonutProcessor(UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = processor(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values if model_name == "naver-clova-ix/donut-base-finetuned-docvqa": SCREAMING_SNAKE_CASE__ = """<s_docvqa><s_question>{user_input}</s_question><s_answer>""" SCREAMING_SNAKE_CASE__ = """When is the coffee break?""" SCREAMING_SNAKE_CASE__ = task_prompt.replace("""{user_input}""" , UpperCamelCase__ ) elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip": SCREAMING_SNAKE_CASE__ = """<s_rvlcdip>""" elif model_name in [ "naver-clova-ix/donut-base-finetuned-cord-v1", "naver-clova-ix/donut-base-finetuned-cord-v1-2560", ]: SCREAMING_SNAKE_CASE__ = """<s_cord>""" elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2": SCREAMING_SNAKE_CASE__ = """s_cord-v2>""" elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket": SCREAMING_SNAKE_CASE__ = """<s_zhtrainticket>""" elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]: # use a random prompt SCREAMING_SNAKE_CASE__ = """hello world""" else: raise ValueError("""Model name not supported""" ) SCREAMING_SNAKE_CASE__ = original_model.decoder.tokenizer(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors="""pt""" )[ """input_ids""" ] SCREAMING_SNAKE_CASE__ = original_model.encoder.model.patch_embed(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.encoder.embeddings(UpperCamelCase__ ) assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) # verify encoder hidden states SCREAMING_SNAKE_CASE__ = original_model.encoder(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = model.encoder(UpperCamelCase__ ).last_hidden_state assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-2 ) # verify decoder hidden states SCREAMING_SNAKE_CASE__ = original_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).logits SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCamelCase__ ) processor.save_pretrained(UpperCamelCase__ ) if push_to_hub: model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" ) processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" ) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='naver-clova-ix/donut-base-finetuned-docvqa', required=False, type=str, help='Name of the original model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, required=False, type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model and processor to the 🤗 hub.', ) _lowerCamelCase = parser.parse_args() convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
6
0
def __snake_case ( __UpperCamelCase : int = 100 ): """simple docstring""" A_ = n * (n + 1) * (2 * n + 1) / 6 A_ = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F"{solution() = }")
86
import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class UpperCamelCase_ ( unittest.TestCase ): def _snake_case ( self :Union[str, Any] ) -> List[str]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self :Any ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" ) SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) sd_pipe.set_scheduler("""sample_euler""" ) SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger""" SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" ) SCREAMING_SNAKE_CASE__ = output.images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _snake_case ( self :str ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) sd_pipe.set_scheduler("""sample_euler""" ) SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger""" SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" ) SCREAMING_SNAKE_CASE__ = output.images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1 def _snake_case ( self :Tuple ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) sd_pipe.set_scheduler("""sample_dpmpp_2m""" ) SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger""" SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = sd_pipe( [prompt] , generator=__A , guidance_scale=7.5 , num_inference_steps=15 , output_type="""np""" , use_karras_sigmas=__A , ) SCREAMING_SNAKE_CASE__ = output.images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ = np.array( [0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
6
0
import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor _lowerCamelCase : Any = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : Any , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Tuple) ->None: '''simple docstring''' warnings.warn( '''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use YolosImageProcessor instead.''' , UpperCAmelCase__ , ) super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__)
87
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int = 600_851_475_143 ): try: SCREAMING_SNAKE_CASE__ = int(UpperCamelCase__ ) except (TypeError, ValueError): raise TypeError("""Parameter n must be int or castable to int.""" ) if n <= 0: raise ValueError("""Parameter n must be greater than or equal to one.""" ) SCREAMING_SNAKE_CASE__ = 1 SCREAMING_SNAKE_CASE__ = 2 while i * i <= n: while n % i == 0: SCREAMING_SNAKE_CASE__ = i n //= i i += 1 if n > 1: SCREAMING_SNAKE_CASE__ = n return int(UpperCamelCase__ ) if __name__ == "__main__": print(F'''{solution() = }''')
6
0
"""simple docstring""" def _snake_case ( __snake_case : str ): """simple docstring""" _lowerCamelCase : Any = 0 # if input_string is "aba" than new_input_string become "a|b|a" _lowerCamelCase : List[Any] = """""" _lowerCamelCase : Union[str, Any] = """""" # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(__snake_case ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring _lowerCamelCase , _lowerCamelCase : Optional[int] = 0, 0 # length[i] shows the length of palindromic substring with center i _lowerCamelCase : List[str] = [1 for i in range(len(__snake_case ) )] # for each character in new_string find corresponding palindromic string _lowerCamelCase : Optional[int] = 0 for j in range(len(__snake_case ) ): _lowerCamelCase : Dict = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(__snake_case ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 _lowerCamelCase : Union[str, Any] = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: _lowerCamelCase : str = j - k + 1 # noqa: E741 _lowerCamelCase : Tuple = j + k - 1 # update max_length and start position if max_length < length[j]: _lowerCamelCase : Optional[int] = length[j] _lowerCamelCase : List[str] = j # create that string _lowerCamelCase : Optional[int] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
88
import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class UpperCamelCase_ ( unittest.TestCase ): def _snake_case ( self :Tuple ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertTrue(is_safetensors_compatible(__A ) ) def _snake_case ( self :List[str] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertTrue(is_safetensors_compatible(__A ) ) def _snake_case ( self :Tuple ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", """unet/diffusion_pytorch_model.bin""", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__A ) ) def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", ] self.assertTrue(is_safetensors_compatible(__A ) ) def _snake_case ( self :Optional[Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", # Removed: 'text_encoder/model.safetensors', """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertFalse(is_safetensors_compatible(__A ) ) def _snake_case ( self :Tuple ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :Any ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :str ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :List[Any] ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", """unet/diffusion_pytorch_model.fp16.bin""", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertFalse(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :str ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :Optional[int] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", # 'text_encoder/model.fp16.safetensors', """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertFalse(is_safetensors_compatible(__A , variant=__A ) )
6
0
SCREAMING_SNAKE_CASE : Tuple = "0.21.0" from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
89
import argparse import datetime def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): SCREAMING_SNAKE_CASE__ = { """0""": """Sunday""", """1""": """Monday""", """2""": """Tuesday""", """3""": """Wednesday""", """4""": """Thursday""", """5""": """Friday""", """6""": """Saturday""", } SCREAMING_SNAKE_CASE__ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(UpperCamelCase__ ) < 11: raise ValueError("""Must be 10 characters long""" ) # Get month SCREAMING_SNAKE_CASE__ = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError("""Month must be between 1 - 12""" ) SCREAMING_SNAKE_CASE__ = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError("""Date separator must be '-' or '/'""" ) # Get day SCREAMING_SNAKE_CASE__ = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError("""Date must be between 1 - 31""" ) # Get second separator SCREAMING_SNAKE_CASE__ = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError("""Date separator must be '-' or '/'""" ) # Get year SCREAMING_SNAKE_CASE__ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8_500: raise ValueError( """Year out of range. There has to be some sort of limit...right?""" ) # Get datetime obj for validation SCREAMING_SNAKE_CASE__ = datetime.date(int(UpperCamelCase__ ) , int(UpperCamelCase__ ) , int(UpperCamelCase__ ) ) # Start math if m <= 2: SCREAMING_SNAKE_CASE__ = y - 1 SCREAMING_SNAKE_CASE__ = m + 12 # maths var SCREAMING_SNAKE_CASE__ = int(str(UpperCamelCase__ )[:2] ) SCREAMING_SNAKE_CASE__ = int(str(UpperCamelCase__ )[2:] ) SCREAMING_SNAKE_CASE__ = int(2.6 * m - 5.3_9 ) SCREAMING_SNAKE_CASE__ = int(c / 4 ) SCREAMING_SNAKE_CASE__ = int(k / 4 ) SCREAMING_SNAKE_CASE__ = int(d + k ) SCREAMING_SNAKE_CASE__ = int(t + u + v + x ) SCREAMING_SNAKE_CASE__ = int(z - (2 * c) ) SCREAMING_SNAKE_CASE__ = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" ) # Response SCREAMING_SNAKE_CASE__ = f'''Your date {date_input}, is a {days[str(UpperCamelCase__ )]}!''' return response if __name__ == "__main__": import doctest doctest.testmod() _lowerCamelCase = argparse.ArgumentParser( description=( 'Find out what day of the week nearly any date is or was. Enter ' 'date as a string in the mm-dd-yyyy or mm/dd/yyyy format' ) ) parser.add_argument( 'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)' ) _lowerCamelCase = parser.parse_args() zeller(args.date_input)
6
0
'''simple docstring''' import itertools import math def _snake_case ( A ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _snake_case ( ) -> Optional[int]: lowerCAmelCase__ = 2 while True: if is_prime(A ): yield num num += 1 def _snake_case ( A = 10001 ) -> int: return next(itertools.islice(prime_generator() , nth - 1 , A ) ) if __name__ == "__main__": print(f"""{solution() = }""")
90
import argparse import logging import pickle from collections import Counter logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) _lowerCamelCase = logging.getLogger(__name__) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser( description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)' ) parser.add_argument( '--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.' ) parser.add_argument( '--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.' ) parser.add_argument('--vocab_size', default=30522, type=int) _lowerCamelCase = parser.parse_args() logger.info(F'''Loading data from {args.data_file}''') with open(args.data_file, 'rb') as fp: _lowerCamelCase = pickle.load(fp) logger.info('Counting occurrences for MLM.') _lowerCamelCase = Counter() for tk_ids in data: counter.update(tk_ids) _lowerCamelCase = [0] * args.vocab_size for k, v in counter.items(): _lowerCamelCase = v logger.info(F'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, 'wb') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
6
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
91
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _lowerCamelCase = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = ['SpeechEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = ['FlaxSpeechEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys _lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
6
0
'''simple docstring''' import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed UpperCamelCase_ = { """distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), """roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), """bert""": (BertConfig, BertForMaskedLM, BertTokenizer), """gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def _lowerCAmelCase ( __magic_name__ : List[Any] ) -> List[Any]: assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : Optional[int] ) -> Tuple: if args.student_type == "roberta": lowercase : List[Any] =False elif args.student_type == "gpt2": lowercase : Dict =False def _lowerCAmelCase ( __magic_name__ : Optional[int] , __magic_name__ : int ) -> str: if args.student_type == "roberta": lowercase : str =False def _lowerCAmelCase ( ) -> List[Any]: lowercase : int =argparse.ArgumentParser(description='''Training''' ) parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' ) parser.add_argument( '''--dump_path''' , type=__magic_name__ , required=__magic_name__ , help='''The output directory (log, checkpoints, parameters, etc.)''' ) parser.add_argument( '''--data_file''' , type=__magic_name__ , required=__magic_name__ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , ) parser.add_argument( '''--student_type''' , type=__magic_name__ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=__magic_name__ , help='''The student type (DistilBERT, RoBERTa).''' , ) parser.add_argument('''--student_config''' , type=__magic_name__ , required=__magic_name__ , help='''Path to the student configuration.''' ) parser.add_argument( '''--student_pretrained_weights''' , default=__magic_name__ , type=__magic_name__ , help='''Load student initialization checkpoint.''' ) parser.add_argument( '''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=__magic_name__ , help='''Teacher type (BERT, RoBERTa).''' ) parser.add_argument('''--teacher_name''' , type=__magic_name__ , required=__magic_name__ , help='''The teacher model.''' ) parser.add_argument('''--temperature''' , default=2.0 , type=__magic_name__ , help='''Temperature for the softmax temperature.''' ) parser.add_argument( '''--alpha_ce''' , default=0.5 , type=__magic_name__ , help='''Linear weight for the distillation loss. Must be >=0.''' ) parser.add_argument( '''--alpha_mlm''' , default=0.0 , type=__magic_name__ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , ) parser.add_argument('''--alpha_clm''' , default=0.5 , type=__magic_name__ , help='''Linear weight for the CLM loss. Must be >=0.''' ) parser.add_argument('''--alpha_mse''' , default=0.0 , type=__magic_name__ , help='''Linear weight of the MSE loss. Must be >=0.''' ) parser.add_argument( '''--alpha_cos''' , default=0.0 , type=__magic_name__ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' ) parser.add_argument( '''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' ) parser.add_argument( '''--mlm_mask_prop''' , default=0.1_5 , type=__magic_name__ , help='''Proportion of tokens for which we need to make a prediction.''' , ) parser.add_argument('''--word_mask''' , default=0.8 , type=__magic_name__ , help='''Proportion of tokens to mask out.''' ) parser.add_argument('''--word_keep''' , default=0.1 , type=__magic_name__ , help='''Proportion of tokens to keep.''' ) parser.add_argument('''--word_rand''' , default=0.1 , type=__magic_name__ , help='''Proportion of tokens to randomly replace.''' ) parser.add_argument( '''--mlm_smoothing''' , default=0.7 , type=__magic_name__ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , ) parser.add_argument('''--token_counts''' , type=__magic_name__ , help='''The token counts in the data_file for MLM.''' ) parser.add_argument( '''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , ) parser.add_argument( '''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , ) parser.add_argument( '''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , ) parser.add_argument('''--n_epoch''' , type=__magic_name__ , default=3 , help='''Number of pass on the whole dataset.''' ) parser.add_argument('''--batch_size''' , type=__magic_name__ , default=5 , help='''Batch size (for each process).''' ) parser.add_argument( '''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , ) parser.add_argument( '''--gradient_accumulation_steps''' , type=__magic_name__ , default=50 , help='''Gradient accumulation for larger training batches.''' , ) parser.add_argument('''--warmup_prop''' , default=0.0_5 , type=__magic_name__ , help='''Linear warmup proportion.''' ) parser.add_argument('''--weight_decay''' , default=0.0 , type=__magic_name__ , help='''Weight decay if we apply some.''' ) parser.add_argument('''--learning_rate''' , default=5E-4 , type=__magic_name__ , help='''The initial learning rate for Adam.''' ) parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=__magic_name__ , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--max_grad_norm''' , default=5.0 , type=__magic_name__ , help='''Max gradient norm.''' ) parser.add_argument('''--initializer_range''' , default=0.0_2 , type=__magic_name__ , help='''Random initialization range.''' ) parser.add_argument( '''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , ) parser.add_argument( '''--fp16_opt_level''' , type=__magic_name__ , default='''O1''' , help=( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].''' '''See details at https://nvidia.github.io/apex/amp.html''' ) , ) parser.add_argument('''--n_gpu''' , type=__magic_name__ , default=1 , help='''Number of GPUs in the node.''' ) parser.add_argument('''--local_rank''' , type=__magic_name__ , default=-1 , help='''Distributed training - Local rank''' ) parser.add_argument('''--seed''' , type=__magic_name__ , default=56 , help='''Random seed''' ) parser.add_argument('''--log_interval''' , type=__magic_name__ , default=500 , help='''Tensorboard logging interval.''' ) parser.add_argument('''--checkpoint_interval''' , type=__magic_name__ , default=4000 , help='''Checkpoint interval.''' ) lowercase : Dict =parser.parse_args() sanity_checks(__magic_name__ ) # ARGS # init_gpu_params(__magic_name__ ) set_seed(__magic_name__ ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' ''' itUse `--force` if you want to overwrite it''' ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''' ) # SAVE PARAMS # logger.info(f'''Param: {args}''' ) with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f: json.dump(vars(__magic_name__ ) , __magic_name__ , indent=4 ) git_log(args.dump_path ) lowercase , lowercase , lowercase : Optional[int] =MODEL_CLASSES[args.student_type] lowercase , lowercase , lowercase : str =MODEL_CLASSES[args.teacher_type] # TOKENIZER # lowercase : Optional[Any] =teacher_tokenizer_class.from_pretrained(args.teacher_name ) lowercase : Tuple ={} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): lowercase : List[str] =tokenizer.all_special_tokens.index(__magic_name__ ) lowercase : int =tokenizer.all_special_ids[idx] logger.info(f'''Special tokens {special_tok_ids}''' ) lowercase : Optional[int] =special_tok_ids lowercase : str =tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f'''Loading data from {args.data_file}''' ) with open(args.data_file , '''rb''' ) as fp: lowercase : List[str] =pickle.load(__magic_name__ ) if args.mlm: logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''' ) with open(args.token_counts , '''rb''' ) as fp: lowercase : Optional[Any] =pickle.load(__magic_name__ ) lowercase : Optional[int] =np.maximum(__magic_name__ , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): lowercase : Optional[Any] =0.0 # do not predict special tokens lowercase : Any =torch.from_numpy(__magic_name__ ) else: lowercase : List[str] =None lowercase : int =LmSeqsDataset(params=__magic_name__ , data=__magic_name__ ) logger.info('''Data loader created.''' ) # STUDENT # logger.info(f'''Loading student config from {args.student_config}''' ) lowercase : Optional[int] =student_config_class.from_pretrained(args.student_config ) lowercase : List[Any] =True if args.student_pretrained_weights is not None: logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''' ) lowercase : Optional[int] =student_model_class.from_pretrained(args.student_pretrained_weights , config=__magic_name__ ) else: lowercase : int =student_model_class(__magic_name__ ) if args.n_gpu > 0: student.to(f'''cuda:{args.local_rank}''' ) logger.info('''Student loaded.''' ) # TEACHER # lowercase : List[Any] =teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__magic_name__ ) if args.n_gpu > 0: teacher.to(f'''cuda:{args.local_rank}''' ) logger.info(f'''Teacher loaded from {args.teacher_name}.''' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(__magic_name__ , __magic_name__ ) if args.freeze_token_type_embds: freeze_token_type_embeddings(__magic_name__ , __magic_name__ ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() lowercase : Optional[Any] =Distiller( params=__magic_name__ , dataset=__magic_name__ , token_probs=__magic_name__ , student=__magic_name__ , teacher=__magic_name__ ) distiller.train() logger.info('''Let\'s go get some drinks.''' ) if __name__ == "__main__": main()
92
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = ["image_processor", "tokenizer"] lowerCamelCase_ = "OwlViTImageProcessor" lowerCamelCase_ = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self :Optional[Any] , __A :int=None , __A :Optional[int]=None , **__A :str ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __A , ) SCREAMING_SNAKE_CASE__ = kwargs.pop("""feature_extractor""" ) SCREAMING_SNAKE_CASE__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__A , __A ) def __call__( self :str , __A :Dict=None , __A :List[str]=None , __A :str=None , __A :Optional[int]="max_length" , __A :Tuple="np" , **__A :int ) -> Tuple: """simple docstring""" if text is None and query_images is None and images is None: raise ValueError( """You have to specify at least one text or query image or image. All three cannot be none.""" ) if text is not None: if isinstance(__A , __A ) or (isinstance(__A , __A ) and not isinstance(text[0] , __A )): SCREAMING_SNAKE_CASE__ = [self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )] elif isinstance(__A , __A ) and isinstance(text[0] , __A ): SCREAMING_SNAKE_CASE__ = [] # Maximum number of queries across batch SCREAMING_SNAKE_CASE__ = max([len(__A ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__A ) != max_num_queries: SCREAMING_SNAKE_CASE__ = t + [""" """] * (max_num_queries - len(__A )) SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , padding=__A , return_tensors=__A , **__A ) encodings.append(__A ) else: raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" ) if return_tensors == "np": SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 ) SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) else: raise ValueError("""Target return tensor type could not be returned""" ) SCREAMING_SNAKE_CASE__ = BatchEncoding() SCREAMING_SNAKE_CASE__ = input_ids SCREAMING_SNAKE_CASE__ = attention_mask if query_images is not None: SCREAMING_SNAKE_CASE__ = BatchEncoding() SCREAMING_SNAKE_CASE__ = self.image_processor( __A , return_tensors=__A , **__A ).pixel_values SCREAMING_SNAKE_CASE__ = query_pixel_values if images is not None: SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A ) if text is not None and images is not None: SCREAMING_SNAKE_CASE__ = image_features.pixel_values return encoding elif query_images is not None and images is not None: SCREAMING_SNAKE_CASE__ = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__A ) , tensor_type=__A ) def _snake_case ( self :List[Any] , *__A :Dict , **__A :Dict ) -> Optional[int]: """simple docstring""" return self.image_processor.post_process(*__A , **__A ) def _snake_case ( self :Optional[int] , *__A :Dict , **__A :List[str] ) -> Optional[Any]: """simple docstring""" return self.image_processor.post_process_object_detection(*__A , **__A ) def _snake_case ( self :str , *__A :List[str] , **__A :Union[str, Any] ) -> Any: """simple docstring""" return self.image_processor.post_process_image_guided_detection(*__A , **__A ) def _snake_case ( self :Dict , *__A :List[str] , **__A :List[str] ) -> int: """simple docstring""" return self.tokenizer.batch_decode(*__A , **__A ) def _snake_case ( self :Dict , *__A :Dict , **__A :List[str] ) -> str: """simple docstring""" return self.tokenizer.decode(*__A , **__A ) @property def _snake_case ( self :List[Any] ) -> Optional[int]: """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __A , ) return self.image_processor_class @property def _snake_case ( self :Any ) -> Optional[Any]: """simple docstring""" warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __A , ) return self.image_processor
6
0
"""simple docstring""" def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int: """simple docstring""" return number | (1 << position) def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int: """simple docstring""" return number & ~(1 << position) def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int: """simple docstring""" return number ^ (1 << position) def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->bool: """simple docstring""" return ((number >> position) & 1) == 1 def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int: """simple docstring""" return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
93
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = 42 class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ ): @register_to_config def __init__( self :Union[str, Any] , __A :int = 3 , __A :int = 3 , __A :Tuple[str] = ("DownEncoderBlock2D",) , __A :Tuple[str] = ("UpDecoderBlock2D",) , __A :Tuple[int] = (64,) , __A :int = 1 , __A :str = "silu" , __A :int = 3 , __A :int = 32 , __A :int = 256 , __A :int = 32 , __A :Optional[int] = None , __A :float = 0.1_8_2_1_5 , __A :str = "group" , ) -> Any: """simple docstring""" super().__init__() # pass init params to Encoder SCREAMING_SNAKE_CASE__ = Encoder( in_channels=__A , out_channels=__A , down_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , double_z=__A , ) SCREAMING_SNAKE_CASE__ = vq_embed_dim if vq_embed_dim is not None else latent_channels SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 ) SCREAMING_SNAKE_CASE__ = VectorQuantizer(__A , __A , beta=0.2_5 , remap=__A , sane_index_shape=__A ) SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 ) # pass init params to Decoder SCREAMING_SNAKE_CASE__ = Decoder( in_channels=__A , out_channels=__A , up_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , norm_type=__A , ) @apply_forward_hook def _snake_case ( self :Union[str, Any] , __A :torch.FloatTensor , __A :bool = True ) -> VQEncoderOutput: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.encoder(__A ) SCREAMING_SNAKE_CASE__ = self.quant_conv(__A ) if not return_dict: return (h,) return VQEncoderOutput(latents=__A ) @apply_forward_hook def _snake_case ( self :Tuple , __A :torch.FloatTensor , __A :bool = False , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: """simple docstring""" if not force_not_quantize: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.quantize(__A ) else: SCREAMING_SNAKE_CASE__ = h SCREAMING_SNAKE_CASE__ = self.post_quant_conv(__A ) SCREAMING_SNAKE_CASE__ = self.decoder(__A , quant if self.config.norm_type == """spatial""" else None ) if not return_dict: return (dec,) return DecoderOutput(sample=__A ) def _snake_case ( self :int , __A :torch.FloatTensor , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: """simple docstring""" SCREAMING_SNAKE_CASE__ = sample SCREAMING_SNAKE_CASE__ = self.encode(__A ).latents SCREAMING_SNAKE_CASE__ = self.decode(__A ).sample if not return_dict: return (dec,) return DecoderOutput(sample=__A )
6
0
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class UpperCAmelCase_ ( __A ): """simple docstring""" UpperCamelCase_ = '''dandelin/vilt-b32-finetuned-vqa''' UpperCamelCase_ = ( '''This is a tool that answers a question about an image. It takes an input named `image` which should be the ''' '''image containing the information, as well as a `question` which should be the question in English. It ''' '''returns a text that is the answer to the question.''' ) UpperCamelCase_ = '''image_qa''' UpperCamelCase_ = AutoProcessor UpperCamelCase_ = AutoModelForVisualQuestionAnswering UpperCamelCase_ = ['''image''', '''text'''] UpperCamelCase_ = ['''text'''] def __init__( self : List[Any] , *UpperCAmelCase : List[str] , **UpperCAmelCase : Dict ) -> Dict: '''simple docstring''' requires_backends(self , ['''vision'''] ) super().__init__(*UpperCAmelCase , **UpperCAmelCase ) def A__ ( self : Tuple , UpperCAmelCase : "Image" , UpperCAmelCase : str ) -> Any: '''simple docstring''' return self.pre_processor(UpperCAmelCase , UpperCAmelCase , return_tensors='''pt''' ) def A__ ( self : int , UpperCAmelCase : Optional[int] ) -> Union[str, Any]: '''simple docstring''' with torch.no_grad(): return self.model(**UpperCAmelCase ).logits def A__ ( self : List[Any] , UpperCAmelCase : Dict ) -> Tuple: '''simple docstring''' lowercase : List[str] =outputs.argmax(-1 ).item() return self.model.config.idalabel[idx]
94
import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = 42 lowerCamelCase_ = jnp.floataa lowerCamelCase_ = True def _snake_case ( self :Tuple ) -> Optional[Any]: """simple docstring""" super().setup() SCREAMING_SNAKE_CASE__ = nn.Dense(5 , dtype=self.dtype ) def __call__( self :List[Any] , *__A :int , **__A :Optional[Any] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = super().__call__(*__A , **__A ) SCREAMING_SNAKE_CASE__ = self.cls(outputs[2] ) return outputs[:2] + (cls_out,) class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = FlaxBigBirdForNaturalQuestionsModule def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Tuple , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ): def cross_entropy(UpperCamelCase__: List[str] , UpperCamelCase__: List[str] , UpperCamelCase__: List[str]=None ): SCREAMING_SNAKE_CASE__ = logits.shape[-1] SCREAMING_SNAKE_CASE__ = (labels[..., None] == jnp.arange(UpperCamelCase__ )[None]).astype("""f4""" ) SCREAMING_SNAKE_CASE__ = jax.nn.log_softmax(UpperCamelCase__ , axis=-1 ) SCREAMING_SNAKE_CASE__ = -jnp.sum(labels * logits , axis=-1 ) if reduction is not None: SCREAMING_SNAKE_CASE__ = reduction(UpperCamelCase__ ) return loss SCREAMING_SNAKE_CASE__ = partial(UpperCamelCase__ , reduction=jnp.mean ) SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class UpperCamelCase_ : lowerCamelCase_ = "google/bigbird-roberta-base" lowerCamelCase_ = 30_00 lowerCamelCase_ = 1_05_00 lowerCamelCase_ = 1_28 lowerCamelCase_ = 3 lowerCamelCase_ = 1 lowerCamelCase_ = 5 # tx_args lowerCamelCase_ = 3e-5 lowerCamelCase_ = 0.0 lowerCamelCase_ = 2_00_00 lowerCamelCase_ = 0.0095 lowerCamelCase_ = "bigbird-roberta-natural-questions" lowerCamelCase_ = "training-expt" lowerCamelCase_ = "data/nq-training.jsonl" lowerCamelCase_ = "data/nq-validation.jsonl" def _snake_case ( self :str ) -> Optional[int]: """simple docstring""" os.makedirs(self.base_dir , exist_ok=__A ) SCREAMING_SNAKE_CASE__ = os.path.join(self.base_dir , self.save_dir ) SCREAMING_SNAKE_CASE__ = self.batch_size_per_device * jax.device_count() @dataclass class UpperCamelCase_ : lowerCamelCase_ = 42 lowerCamelCase_ = 40_96 # no dynamic padding on TPUs def __call__( self :Optional[Any] , __A :Optional[int] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.collate_fn(__A ) SCREAMING_SNAKE_CASE__ = jax.tree_util.tree_map(__A , __A ) return batch def _snake_case ( self :List[Any] , __A :Union[str, Any] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.fetch_inputs(features["""input_ids"""] ) SCREAMING_SNAKE_CASE__ = { """input_ids""": jnp.array(__A , dtype=jnp.intaa ), """attention_mask""": jnp.array(__A , dtype=jnp.intaa ), """start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ), """end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ), """pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ), } return batch def _snake_case ( self :Tuple , __A :list ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = [self._fetch_inputs(__A ) for ids in input_ids] return zip(*__A ) def _snake_case ( self :List[str] , __A :list ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [1 for _ in range(len(__A ) )] while len(__A ) < self.max_length: input_ids.append(self.pad_id ) attention_mask.append(0 ) return input_ids, attention_mask def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any]=None ): if seed is not None: SCREAMING_SNAKE_CASE__ = dataset.shuffle(seed=UpperCamelCase__ ) for i in range(len(UpperCamelCase__ ) // batch_size ): SCREAMING_SNAKE_CASE__ = dataset[i * batch_size : (i + 1) * batch_size] yield dict(UpperCamelCase__ ) @partial(jax.pmap , axis_name="""batch""" ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] , **UpperCamelCase__: Optional[int] ): def loss_fn(UpperCamelCase__: List[Any] ): SCREAMING_SNAKE_CASE__ = model_inputs.pop("""start_labels""" ) SCREAMING_SNAKE_CASE__ = model_inputs.pop("""end_labels""" ) SCREAMING_SNAKE_CASE__ = model_inputs.pop("""pooled_labels""" ) SCREAMING_SNAKE_CASE__ = state.apply_fn(**UpperCamelCase__ , params=UpperCamelCase__ , dropout_rng=UpperCamelCase__ , train=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs return state.loss_fn( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = jax.random.split(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = jax.value_and_grad(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = grad_fn(state.params ) SCREAMING_SNAKE_CASE__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) SCREAMING_SNAKE_CASE__ = jax.lax.pmean(UpperCamelCase__ , """batch""" ) SCREAMING_SNAKE_CASE__ = state.apply_gradients(grads=UpperCamelCase__ ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name="""batch""" ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , **UpperCamelCase__: Dict ): SCREAMING_SNAKE_CASE__ = model_inputs.pop("""start_labels""" ) SCREAMING_SNAKE_CASE__ = model_inputs.pop("""end_labels""" ) SCREAMING_SNAKE_CASE__ = model_inputs.pop("""pooled_labels""" ) SCREAMING_SNAKE_CASE__ = state.apply_fn(**UpperCamelCase__ , params=state.params , train=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs SCREAMING_SNAKE_CASE__ = state.loss_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) return metrics class UpperCamelCase_ ( train_state.TrainState ): lowerCamelCase_ = struct.field(pytree_node=UpperCamelCase__ ) @dataclass class UpperCamelCase_ : lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = None def _snake_case ( self :List[Any] , __A :str , __A :str , __A :str , __A :Tuple=None ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = model.params SCREAMING_SNAKE_CASE__ = TrainState.create( apply_fn=model.__call__ , params=__A , tx=__A , loss_fn=__A , ) if ckpt_dir is not None: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = restore_checkpoint(__A , __A ) SCREAMING_SNAKE_CASE__ = { """lr""": args.lr, """init_lr""": args.init_lr, """warmup_steps""": args.warmup_steps, """num_train_steps""": num_train_steps, """weight_decay""": args.weight_decay, } SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = build_tx(**__A ) SCREAMING_SNAKE_CASE__ = train_state.TrainState( step=__A , apply_fn=model.__call__ , params=__A , tx=__A , opt_state=__A , ) SCREAMING_SNAKE_CASE__ = args SCREAMING_SNAKE_CASE__ = data_collator SCREAMING_SNAKE_CASE__ = lr SCREAMING_SNAKE_CASE__ = params SCREAMING_SNAKE_CASE__ = jax_utils.replicate(__A ) return state def _snake_case ( self :Optional[Any] , __A :Optional[int] , __A :int , __A :int ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.args SCREAMING_SNAKE_CASE__ = len(__A ) // args.batch_size SCREAMING_SNAKE_CASE__ = jax.random.PRNGKey(0 ) SCREAMING_SNAKE_CASE__ = jax.random.split(__A , jax.device_count() ) for epoch in range(args.max_epochs ): SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE__ = get_batched_dataset(__A , args.batch_size , seed=__A ) SCREAMING_SNAKE_CASE__ = 0 for batch in tqdm(__A , total=__A , desc=f'''Running EPOCH-{epoch}''' ): SCREAMING_SNAKE_CASE__ = self.data_collator(__A ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.train_step_fn(__A , __A , **__A ) running_loss += jax_utils.unreplicate(metrics["""loss"""] ) i += 1 if i % args.logging_steps == 0: SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(state.step ) SCREAMING_SNAKE_CASE__ = running_loss.item() / i SCREAMING_SNAKE_CASE__ = self.scheduler_fn(state_step - 1 ) SCREAMING_SNAKE_CASE__ = self.evaluate(__A , __A ) SCREAMING_SNAKE_CASE__ = { """step""": state_step.item(), """eval_loss""": eval_loss.item(), """tr_loss""": tr_loss, """lr""": lr.item(), } tqdm.write(str(__A ) ) self.logger.log(__A , commit=__A ) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=__A ) def _snake_case ( self :List[str] , __A :Dict , __A :str ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = get_batched_dataset(__A , self.args.batch_size ) SCREAMING_SNAKE_CASE__ = len(__A ) // self.args.batch_size SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE__ = 0 for batch in tqdm(__A , total=__A , desc="""Evaluating ... """ ): SCREAMING_SNAKE_CASE__ = self.data_collator(__A ) SCREAMING_SNAKE_CASE__ = self.val_step_fn(__A , **__A ) running_loss += jax_utils.unreplicate(metrics["""loss"""] ) i += 1 return running_loss / i def _snake_case ( self :List[Any] , __A :Any , __A :Dict ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(__A ) print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=""" ... """ ) self.model_save_fn(__A , params=state.params ) with open(os.path.join(__A , """opt_state.msgpack""" ) , """wb""" ) as f: f.write(to_bytes(state.opt_state ) ) joblib.dump(self.args , os.path.join(__A , """args.joblib""" ) ) joblib.dump(self.data_collator , os.path.join(__A , """data_collator.joblib""" ) ) with open(os.path.join(__A , """training_state.json""" ) , """w""" ) as f: json.dump({"""step""": state.step.item()} , __A ) print("""DONE""" ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ): print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=""" ... """ ) with open(os.path.join(UpperCamelCase__ , """flax_model.msgpack""" ) , """rb""" ) as f: SCREAMING_SNAKE_CASE__ = from_bytes(state.params , f.read() ) with open(os.path.join(UpperCamelCase__ , """opt_state.msgpack""" ) , """rb""" ) as f: SCREAMING_SNAKE_CASE__ = from_bytes(state.opt_state , f.read() ) SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(UpperCamelCase__ , """args.joblib""" ) ) SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(UpperCamelCase__ , """data_collator.joblib""" ) ) with open(os.path.join(UpperCamelCase__ , """training_state.json""" ) , """r""" ) as f: SCREAMING_SNAKE_CASE__ = json.load(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = training_state["""step"""] print("""DONE""" ) return params, opt_state, step, args, data_collator def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ): SCREAMING_SNAKE_CASE__ = num_train_steps - warmup_steps SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=UpperCamelCase__ , transition_steps=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=1e-7 , transition_steps=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple ): def weight_decay_mask(UpperCamelCase__: Any ): SCREAMING_SNAKE_CASE__ = traverse_util.flatten_dict(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()} return traverse_util.unflatten_dict(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = scheduler_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = optax.adamw(learning_rate=UpperCamelCase__ , weight_decay=UpperCamelCase__ , mask=UpperCamelCase__ ) return tx, lr
6
0
"""simple docstring""" from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def snake_case ( A__ ,A__ ,A__ = None ): if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release: # old versions of hfh don't url-encode the file path UpperCAmelCase_ : List[str] = quote(A__ ) return hfh.hf_hub_url(A__ ,A__ ,repo_type="dataset" ,revision=A__ )
95
from torch import nn def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(f'''Unsupported activation function: {act_fn}''' )
6
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy __lowerCamelCase = logging.get_logger(__name__) class __A ( SCREAMING_SNAKE_CASE_ ): def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : float , **__snake_case : Optional[Any] ) -> str: __magic_name__: Optional[Any] = feature_size __magic_name__: List[Any] = sampling_rate __magic_name__: Tuple = padding_value __magic_name__: int = kwargs.pop("""padding_side""" , """right""" ) __magic_name__: Optional[Any] = kwargs.pop("""return_attention_mask""" , __snake_case ) super().__init__(**__snake_case ) def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : Union[ BatchFeature, List[BatchFeature], Dict[str, BatchFeature], Dict[str, List[BatchFeature]], List[Dict[str, BatchFeature]], ] , __snake_case : Union[bool, str, PaddingStrategy] = True , __snake_case : Optional[int] = None , __snake_case : bool = False , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[Union[str, TensorType]] = None , ) -> BatchFeature: # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(__snake_case , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): __magic_name__: Union[str, Any] = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( """You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`""" F' to this method that includes {self.model_input_names[0]}, but you provided' F' {list(processed_features.keys() )}' ) __magic_name__: Any = processed_features[self.model_input_names[0]] __magic_name__: Tuple = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(__snake_case ) == 0: if return_attention_mask: __magic_name__: Optional[Any] = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch __magic_name__: Tuple = required_input[0] if isinstance(__snake_case , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. __magic_name__: str = 0 while len(required_input[index] ) == 0: index += 1 if index < len(__snake_case ): __magic_name__: Optional[int] = required_input[index][0] if return_tensors is None: if is_tf_tensor(__snake_case ): __magic_name__: List[str] = """tf""" elif is_torch_tensor(__snake_case ): __magic_name__: Any = """pt""" elif isinstance(__snake_case , (int, float, list, tuple, np.ndarray) ): __magic_name__: int = """np""" else: raise ValueError( F'type of {first_element} unknown: {type(__snake_case )}. ' """Should be one of a python, numpy, pytorch or tensorflow object.""" ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): __magic_name__: List[str] = to_numpy(__snake_case ) else: __magic_name__: Any = [to_numpy(__snake_case ) for v in value] # Convert padding_strategy in PaddingStrategy __magic_name__: str = self._get_padding_strategies(padding=__snake_case , max_length=__snake_case ) __magic_name__: str = processed_features[self.model_input_names[0]] __magic_name__: str = len(__snake_case ) if not all(len(__snake_case ) == batch_size for v in processed_features.values() ): raise ValueError("""Some items in the output dictionary have a different batch size than others.""" ) __magic_name__: List[Any] = [] for i in range(__snake_case ): __magic_name__: List[Any] = {k: v[i] for k, v in processed_features.items()} # truncation __magic_name__: List[Any] = self._truncate( __snake_case , max_length=__snake_case , pad_to_multiple_of=__snake_case , truncation=__snake_case , ) truncated_inputs.append(__snake_case ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length __magic_name__: Union[str, Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) __magic_name__: Union[str, Any] = PaddingStrategy.MAX_LENGTH __magic_name__: List[str] = {} for i in range(__snake_case ): # padding __magic_name__: str = self._pad( truncated_inputs[i] , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , ) for key, value in outputs.items(): if key not in batch_outputs: __magic_name__: Optional[Any] = [] if value.dtype is np.dtype(np.floataa ): __magic_name__: Any = value.astype(np.floataa ) batch_outputs[key].append(__snake_case ) return BatchFeature(__snake_case , tensor_type=__snake_case ) def lowerCamelCase__ ( self : Tuple , __snake_case : Union[Dict[str, np.ndarray], BatchFeature] , __snake_case : Optional[int] = None , __snake_case : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ) -> dict: __magic_name__: List[str] = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: __magic_name__: List[Any] = len(__snake_case ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): __magic_name__: List[str] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of __magic_name__: str = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__snake_case ) < max_length if return_attention_mask and "attention_mask" not in processed_features: __magic_name__: int = np.ones(len(__snake_case ) , dtype=np.intaa ) if needs_to_be_padded: __magic_name__: str = max_length - len(__snake_case ) if self.padding_side == "right": if return_attention_mask: __magic_name__: List[Any] = np.pad( processed_features["""attention_mask"""] , (0, difference) ) __magic_name__: Union[str, Any] = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) __magic_name__: int = np.pad( __snake_case , __snake_case , """constant""" , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: __magic_name__: Dict = np.pad( processed_features["""attention_mask"""] , (difference, 0) ) __magic_name__: Optional[Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) __magic_name__: int = np.pad( __snake_case , __snake_case , """constant""" , constant_values=self.padding_value ) else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) ) return processed_features def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Union[Dict[str, np.ndarray], BatchFeature] , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ) -> int: if not truncation: return processed_features elif truncation and max_length is None: raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" ) __magic_name__: Dict = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): __magic_name__: Optional[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of __magic_name__: Tuple = len(__snake_case ) > max_length if needs_to_be_truncated: __magic_name__: Any = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: __magic_name__: List[Any] = processed_features["""attention_mask"""][:max_length] return processed_features def lowerCamelCase__ ( self : List[Any] , __snake_case : int=False , __snake_case : Tuple=None ) -> Optional[Any]: # Get padding strategy if padding is not False: if padding is True: __magic_name__: Optional[Any] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(__snake_case , __snake_case ): __magic_name__: Tuple = PaddingStrategy(__snake_case ) elif isinstance(__snake_case , __snake_case ): __magic_name__: Dict = padding else: __magic_name__: int = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( F'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( """Asking to pad but the feature_extractor does not have a padding value. Please select a value to use""" """ as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" ) return padding_strategy
96
import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): SCREAMING_SNAKE_CASE__ = SwinConfig.from_pretrained( """microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) SCREAMING_SNAKE_CASE__ = MaskFormerConfig(backbone_config=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = """huggingface/label-files""" if "ade20k-full" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 847 SCREAMING_SNAKE_CASE__ = """maskformer-ade20k-full-id2label.json""" elif "ade" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 150 SCREAMING_SNAKE_CASE__ = """ade20k-id2label.json""" elif "coco-stuff" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 171 SCREAMING_SNAKE_CASE__ = """maskformer-coco-stuff-id2label.json""" elif "coco" in model_name: # TODO SCREAMING_SNAKE_CASE__ = 133 SCREAMING_SNAKE_CASE__ = """coco-panoptic-id2label.json""" elif "cityscapes" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 19 SCREAMING_SNAKE_CASE__ = """cityscapes-id2label.json""" elif "vistas" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 65 SCREAMING_SNAKE_CASE__ = """mapillary-vistas-id2label.json""" SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) ) SCREAMING_SNAKE_CASE__ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} return config def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ): SCREAMING_SNAKE_CASE__ = [] # stem # fmt: off rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((f'''backbone.layers.{i}.downsample.reduction.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((f'''backbone.layers.{i}.downsample.norm.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((f'''backbone.layers.{i}.downsample.norm.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') ) # FPN rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((f'''sem_seg_head.adapter_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') ) rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') ) rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') ) rename_keys.append((f'''sem_seg_head.layer_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') ) rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') ) rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') ) rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") ) rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') ) # cross-attention out projection rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') ) # MLP 1 rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') ) # MLP 2 rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') ) # layernorm 1 (self-attention layernorm) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') ) # layernorm 2 (cross-attention layernorm) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') ) # layernorm 3 (final layernorm) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") ) # heads on top rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") ) for i in range(3 ): rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', f'''mask_embedder.{i}.0.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', f'''mask_embedder.{i}.0.bias''') ) # fmt: on return rename_keys def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] ): SCREAMING_SNAKE_CASE__ = dct.pop(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = val def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] ): SCREAMING_SNAKE_CASE__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): SCREAMING_SNAKE_CASE__ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' ) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ = in_proj_weight[:dim, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[: dim] SCREAMING_SNAKE_CASE__ = in_proj_weight[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE__ = in_proj_bias[ dim : dim * 2 ] SCREAMING_SNAKE_CASE__ = in_proj_weight[ -dim :, : ] SCREAMING_SNAKE_CASE__ = in_proj_bias[-dim :] # fmt: on def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ): # fmt: off SCREAMING_SNAKE_CASE__ = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' ) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size] SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2] SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' ) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size] SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2] SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :] # fmt: on def SCREAMING_SNAKE_CASE__ ( ): SCREAMING_SNAKE_CASE__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" SCREAMING_SNAKE_CASE__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: bool = False ): SCREAMING_SNAKE_CASE__ = get_maskformer_config(UpperCamelCase__ ) # load original state_dict with open(UpperCamelCase__ , """rb""" ) as f: SCREAMING_SNAKE_CASE__ = pickle.load(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = data["""model"""] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys SCREAMING_SNAKE_CASE__ = create_rename_keys(UpperCamelCase__ ) for src, dest in rename_keys: rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) read_in_swin_q_k_v(UpperCamelCase__ , config.backbone_config ) read_in_decoder_q_k_v(UpperCamelCase__ , UpperCamelCase__ ) # update to torch tensors for key, value in state_dict.items(): SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase__ ) # load 🤗 model SCREAMING_SNAKE_CASE__ = MaskFormerForInstanceSegmentation(UpperCamelCase__ ) model.eval() for name, param in model.named_parameters(): print(UpperCamelCase__ , param.shape ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(UpperCamelCase__ ) == 0, f'''Unexpected keys: {unexpected_keys}''' # verify results SCREAMING_SNAKE_CASE__ = prepare_img() if "vistas" in model_name: SCREAMING_SNAKE_CASE__ = 65 elif "cityscapes" in model_name: SCREAMING_SNAKE_CASE__ = 65_535 else: SCREAMING_SNAKE_CASE__ = 255 SCREAMING_SNAKE_CASE__ = True if """ade""" in model_name else False SCREAMING_SNAKE_CASE__ = MaskFormerImageProcessor(ignore_index=UpperCamelCase__ , reduce_labels=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = image_processor(UpperCamelCase__ , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE__ = model(**UpperCamelCase__ ) print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": SCREAMING_SNAKE_CASE__ = torch.tensor( [[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' ) Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) image_processor.save_pretrained(UpperCamelCase__ ) if push_to_hub: print("""Pushing model and image processor to the hub...""" ) model.push_to_hub(f'''nielsr/{model_name}''' ) image_processor.push_to_hub(f'''nielsr/{model_name}''' ) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='maskformer-swin-tiny-ade', type=str, help=('Name of the MaskFormer model you\'d like to convert',), ) parser.add_argument( '--checkpoint_path', default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl', type=str, help='Path to the original state dict (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) _lowerCamelCase = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
6
0
import requests def a ( snake_case__: str , snake_case__: str ): '''simple docstring''' lowercase_ = {'''Content-Type''': '''application/json'''} lowercase_ = requests.post(snake_case__ , json={'''text''': message_body} , headers=snake_case__ ) if response.status_code != 200: lowercase_ = ( '''Request to slack returned an error ''' F'''{response.status_code}, the response is:\n{response.text}''' ) raise ValueError(snake_case__ ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
97
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCamelCase = logging.get_logger(__name__) _lowerCamelCase = { 'nielsr/canine-s': 2048, } # Unicode defines 1,114,112 total “codepoints” _lowerCamelCase = 1114112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py _lowerCamelCase = 0 _lowerCamelCase = 0XE0_00 _lowerCamelCase = 0XE0_01 _lowerCamelCase = 0XE0_02 _lowerCamelCase = 0XE0_03 _lowerCamelCase = 0XE0_04 # Maps special codepoints to human-readable names. _lowerCamelCase = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. _lowerCamelCase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self :str , __A :str=chr(__A ) , __A :str=chr(__A ) , __A :Dict=chr(__A ) , __A :str=chr(__A ) , __A :Union[str, Any]=chr(__A ) , __A :str=chr(__A ) , __A :int=False , __A :int=2048 , **__A :Dict , ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token super().__init__( bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , model_max_length=__A , **__A , ) # Creates a mapping for looking up the IDs of special symbols. SCREAMING_SNAKE_CASE__ = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): SCREAMING_SNAKE_CASE__ = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. SCREAMING_SNAKE_CASE__ = { codepoint: name for name, codepoint in self._special_codepoints.items() } SCREAMING_SNAKE_CASE__ = UNICODE_VOCAB_SIZE SCREAMING_SNAKE_CASE__ = len(self._special_codepoints ) @property def _snake_case ( self :Optional[Any] ) -> int: """simple docstring""" return self._unicode_vocab_size def _snake_case ( self :Tuple , __A :str ) -> List[str]: """simple docstring""" return list(__A ) def _snake_case ( self :Optional[Any] , __A :str ) -> int: """simple docstring""" try: return ord(__A ) except TypeError: raise ValueError(f'''invalid token: \'{token}\'''' ) def _snake_case ( self :str , __A :int ) -> str: """simple docstring""" try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(__A ) except TypeError: raise ValueError(f'''invalid id: {index}''' ) def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Any: """simple docstring""" return "".join(__A ) def _snake_case ( self :Optional[Any] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [self.sep_token_id] SCREAMING_SNAKE_CASE__ = [self.cls_token_id] SCREAMING_SNAKE_CASE__ = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def _snake_case ( self :List[Any] , __A :List[int] , __A :Optional[List[int]] = None , __A :bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) SCREAMING_SNAKE_CASE__ = [1] + ([0] * len(__A )) + [1] if token_ids_a is not None: result += ([0] * len(__A )) + [1] return result def _snake_case ( self :List[str] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [self.sep_token_id] SCREAMING_SNAKE_CASE__ = [self.cls_token_id] SCREAMING_SNAKE_CASE__ = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def _snake_case ( self :int , __A :str , __A :Optional[str] = None ) -> Any: """simple docstring""" return ()
6
0
'''simple docstring''' from __future__ import annotations from typing import TypedDict class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : str _snake_case : int def a__ ( lowercase : str ) -> list[str]: """simple docstring""" if not isinstance(lowercase, lowercase ): raise TypeError('''The parameter s type must be str.''' ) return [s[i:] + s[:i] for i in range(len(lowercase ) )] def a__ ( lowercase : str ) -> BWTTransformDict: """simple docstring""" if not isinstance(lowercase, lowercase ): raise TypeError('''The parameter s type must be str.''' ) if not s: raise ValueError('''The parameter s must not be empty.''' ) _UpperCamelCase = all_rotations(lowercase ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation _UpperCamelCase = { "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(lowercase ), } return response def a__ ( lowercase : str, lowercase : int ) -> str: """simple docstring""" if not isinstance(lowercase, lowercase ): raise TypeError('''The parameter bwt_string type must be str.''' ) if not bwt_string: raise ValueError('''The parameter bwt_string must not be empty.''' ) try: _UpperCamelCase = int(lowercase ) except ValueError: raise TypeError( '''The parameter idx_original_string type must be int or passive''' ''' of cast to int.''' ) if idx_original_string < 0: raise ValueError('''The parameter idx_original_string must not be lower than 0.''' ) if idx_original_string >= len(lowercase ): raise ValueError( '''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' ) _UpperCamelCase = [''''''] * len(lowercase ) for _ in range(len(lowercase ) ): for i in range(len(lowercase ) ): _UpperCamelCase = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": lowercase__ : List[str] = 'Provide a string that I will generate its BWT transform: ' lowercase__ : Optional[int] = input(entry_msg).strip() lowercase__ : Dict = bwt_transform(s) print( F"""Burrows Wheeler transform for string '{s}' results """ F"""in '{result['bwt_string']}'""" ) lowercase__ : Optional[Any] = reverse_bwt(result['bwt_string'], result['idx_original_string']) print( F"""Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' """ F"""we get original string '{original_string}'""" )
98
import inspect import os import torch from transformers import AutoModel from transformers.testing_utils import mockenv_context from transformers.trainer_utils import set_seed import accelerate from accelerate.accelerator import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils.testing import ( AccelerateTestCase, TempDirTestCase, execute_subprocess_async, require_cuda, require_fsdp, require_multi_gpu, slow, ) from accelerate.utils.constants import ( FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, ) from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin from accelerate.utils.other import patch_environment set_seed(42) _lowerCamelCase = 'bert-base-cased' _lowerCamelCase = 'fp16' _lowerCamelCase = 'bf16' _lowerCamelCase = [FPaa, BFaa] @require_fsdp @require_cuda class UpperCamelCase_ ( UpperCamelCase__ ): def _snake_case ( self :Optional[Any] ) -> Optional[Any]: """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE__ = dict( ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , ) def _snake_case ( self :List[Any] ) -> Tuple: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy for i, strategy in enumerate(__A ): SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = f'''{i + 1}''' SCREAMING_SNAKE_CASE__ = strategy with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) ) def _snake_case ( self :int ) -> List[str]: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch for i, prefetch_policy in enumerate(__A ): SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = prefetch_policy with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() if prefetch_policy == "NO_PREFETCH": self.assertIsNone(fsdp_plugin.backward_prefetch ) else: self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) ) def _snake_case ( self :List[str] ) -> List[str]: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType for i, state_dict_type in enumerate(__A ): SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = state_dict_type with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) ) if state_dict_type == "FULL_STATE_DICT": self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu ) self.assertTrue(fsdp_plugin.state_dict_config.ranka_only ) def _snake_case ( self :str ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = AutoModel.from_pretrained(__A ) for policy in FSDP_AUTO_WRAP_POLICY: SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = policy if policy == "TRANSFORMER_BASED_WRAP": SCREAMING_SNAKE_CASE__ = """BertLayer""" elif policy == "SIZE_BASED_WRAP": SCREAMING_SNAKE_CASE__ = """2000""" with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__A ) if policy == "NO_WRAP": self.assertIsNone(fsdp_plugin.auto_wrap_policy ) else: self.assertIsNotNone(fsdp_plugin.auto_wrap_policy ) SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = """TRANSFORMER_BASED_WRAP""" SCREAMING_SNAKE_CASE__ = """T5Layer""" with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() with self.assertRaises(__A ) as cm: fsdp_plugin.set_auto_wrap_policy(__A ) self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) ) SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = """SIZE_BASED_WRAP""" SCREAMING_SNAKE_CASE__ = """0""" with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__A ) self.assertIsNone(fsdp_plugin.auto_wrap_policy ) def _snake_case ( self :Optional[Any] ) -> Optional[int]: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = mp_dtype with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = Accelerator() if mp_dtype == "fp16": SCREAMING_SNAKE_CASE__ = torch.floataa elif mp_dtype == "bf16": SCREAMING_SNAKE_CASE__ = torch.bfloataa SCREAMING_SNAKE_CASE__ = MixedPrecision(param_dtype=__A , reduce_dtype=__A , buffer_dtype=__A ) self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __A ) if mp_dtype == FPaa: self.assertTrue(isinstance(accelerator.scaler , __A ) ) elif mp_dtype == BFaa: self.assertIsNone(accelerator.scaler ) AcceleratorState._reset_state(__A ) def _snake_case ( self :str ) -> str: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload for flag in [True, False]: SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = str(__A ).lower() with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__A ) ) @require_fsdp @require_multi_gpu @slow class UpperCamelCase_ ( UpperCamelCase__ ): def _snake_case ( self :Any ) -> Any: """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE__ = 0.8_2 SCREAMING_SNAKE_CASE__ = [ """fsdp_shard_grad_op_transformer_based_wrap""", """fsdp_full_shard_transformer_based_wrap""", ] SCREAMING_SNAKE_CASE__ = { """multi_gpu_fp16""": 3200, """fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000, """fsdp_full_shard_transformer_based_wrap_fp16""": 1900, # Disabling below test as it overwhelms the RAM memory usage # on CI self-hosted runner leading to tests getting killed. # "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang } SCREAMING_SNAKE_CASE__ = 160 SCREAMING_SNAKE_CASE__ = 160 SCREAMING_SNAKE_CASE__ = inspect.getfile(accelerate.test_utils ) SCREAMING_SNAKE_CASE__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] ) def _snake_case ( self :Union[str, Any] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_performance.py""" ) SCREAMING_SNAKE_CASE__ = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""] for config in self.performance_configs: SCREAMING_SNAKE_CASE__ = cmd.copy() for i, strategy in enumerate(__A ): if strategy.lower() in config: cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' ) break if "fp32" in config: cmd_config.append("""--mixed_precision=no""" ) else: cmd_config.append("""--mixed_precision=fp16""" ) if "cpu_offload" in config: cmd_config.append("""--fsdp_offload_params=True""" ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in config: cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" ) elif policy == "SIZE_BASED_WRAP": cmd_config.append("""--fsdp_min_num_params=2000""" ) cmd_config.extend( [ self.test_file_path, f'''--output_dir={self.tmpdir}''', f'''--performance_lower_bound={self.performance_lower_bound}''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A , env=os.environ.copy() ) def _snake_case ( self :Dict ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" ) SCREAMING_SNAKE_CASE__ = [ """accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp""", """--mixed_precision=fp16""", """--fsdp_transformer_layer_cls_to_wrap=BertLayer""", ] for i, strategy in enumerate(__A ): SCREAMING_SNAKE_CASE__ = cmd.copy() cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' ) if strategy != "FULL_SHARD": continue SCREAMING_SNAKE_CASE__ = len(__A ) for state_dict_type in FSDP_STATE_DICT_TYPE: SCREAMING_SNAKE_CASE__ = cmd_config[:state_dict_config_index] cmd_config.append(f'''--fsdp_state_dict_type={state_dict_type}''' ) cmd_config.extend( [ self.test_file_path, f'''--output_dir={self.tmpdir}''', """--partial_train_epoch=1""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A , env=os.environ.copy() ) SCREAMING_SNAKE_CASE__ = cmd_config[:-1] SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdir , """epoch_0""" ) cmd_config.extend( [ f'''--resume_from_checkpoint={resume_from_checkpoint}''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A , env=os.environ.copy() ) def _snake_case ( self :Tuple ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" ) SCREAMING_SNAKE_CASE__ = [ """accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", ] for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): SCREAMING_SNAKE_CASE__ = cmd.copy() if "fp16" in spec: cmd_config.extend(["""--mixed_precision=fp16"""] ) else: cmd_config.extend(["""--mixed_precision=no"""] ) if "multi_gpu" in spec: continue else: cmd_config.extend(["""--use_fsdp"""] ) for i, strategy in enumerate(__A ): if strategy.lower() in spec: cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' ) break if "cpu_offload" in spec: cmd_config.append("""--fsdp_offload_params=True""" ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in spec: cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" ) elif policy == "SIZE_BASED_WRAP": cmd_config.append("""--fsdp_min_num_params=2000""" ) cmd_config.extend( [ self.test_file_path, f'''--output_dir={self.tmpdir}''', f'''--peak_memory_upper_bound={peak_mem_upper_bound}''', f'''--n_train={self.n_train}''', f'''--n_val={self.n_val}''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A , env=os.environ.copy() )
6
0
import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem SCREAMING_SNAKE_CASE = importlib.util.find_spec('s3fs') is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 SCREAMING_SNAKE_CASE = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(f'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''') fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def a (lowerCAmelCase__ ): if "://" in dataset_path: __a = dataset_path.split("""://""" )[1] return dataset_path def a (lowerCAmelCase__ ): if fs is not None and fs.protocol != "file": return True else: return False def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): __a = not is_remote_filesystem(lowerCAmelCase__ ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(lowerCAmelCase__ ) , fs._strip_protocol(lowerCAmelCase__ ) ) else: fs.mv(lowerCAmelCase__ , lowerCAmelCase__ , recursive=lowerCAmelCase__ ) def a (): if hasattr(fsspec.asyn , """reset_lock""" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: __a = None __a = None __a = threading.Lock()
99
import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig _lowerCamelCase = logging.get_logger(__name__) # General docstring _lowerCamelCase = 'PoolFormerConfig' # Base docstring _lowerCamelCase = 'sail/poolformer_s12' _lowerCamelCase = [1, 512, 7, 7] # Image classification docstring _lowerCamelCase = 'sail/poolformer_s12' _lowerCamelCase = 'tabby, tabby cat' _lowerCamelCase = [ 'sail/poolformer_s12', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: float = 0.0 , UpperCamelCase__: bool = False ): if drop_prob == 0.0 or not training: return input SCREAMING_SNAKE_CASE__ = 1 - drop_prob SCREAMING_SNAKE_CASE__ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets SCREAMING_SNAKE_CASE__ = keep_prob + torch.rand(UpperCamelCase__ , dtype=input.dtype , device=input.device ) random_tensor.floor_() # binarize SCREAMING_SNAKE_CASE__ = input.div(UpperCamelCase__ ) * random_tensor return output class UpperCamelCase_ ( nn.Module ): def __init__( self :Optional[Any] , __A :Optional[float] = None ) -> None: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = drop_prob def _snake_case ( self :Any , __A :torch.Tensor ) -> torch.Tensor: """simple docstring""" return drop_path(__A , self.drop_prob , self.training ) def _snake_case ( self :Dict ) -> str: """simple docstring""" return "p={}".format(self.drop_prob ) class UpperCamelCase_ ( nn.Module ): def __init__( self :Dict , __A :Optional[Any] , __A :Dict , __A :List[str] , __A :Optional[Any] , __A :Tuple , __A :Optional[Any]=None ) -> Union[str, Any]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = patch_size if isinstance(__A , collections.abc.Iterable ) else (patch_size, patch_size) SCREAMING_SNAKE_CASE__ = stride if isinstance(__A , collections.abc.Iterable ) else (stride, stride) SCREAMING_SNAKE_CASE__ = padding if isinstance(__A , collections.abc.Iterable ) else (padding, padding) SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , kernel_size=__A , stride=__A , padding=__A ) SCREAMING_SNAKE_CASE__ = norm_layer(__A ) if norm_layer else nn.Identity() def _snake_case ( self :Dict , __A :Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.projection(__A ) SCREAMING_SNAKE_CASE__ = self.norm(__A ) return embeddings class UpperCamelCase_ ( nn.GroupNorm ): def __init__( self :Dict , __A :Tuple , **__A :Union[str, Any] ) -> Dict: """simple docstring""" super().__init__(1 , __A , **__A ) class UpperCamelCase_ ( nn.Module ): def __init__( self :List[str] , __A :Optional[int] ) -> Any: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = nn.AvgPoolad(__A , stride=1 , padding=pool_size // 2 , count_include_pad=__A ) def _snake_case ( self :Any , __A :Optional[Any] ) -> Optional[Any]: """simple docstring""" return self.pool(__A ) - hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self :Optional[Any] , __A :Tuple , __A :Dict , __A :int , __A :Any ) -> str: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 ) SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 ) SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A ) if isinstance(config.hidden_act , __A ): SCREAMING_SNAKE_CASE__ = ACTaFN[config.hidden_act] else: SCREAMING_SNAKE_CASE__ = config.hidden_act def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.conva(__A ) SCREAMING_SNAKE_CASE__ = self.act_fn(__A ) SCREAMING_SNAKE_CASE__ = self.drop(__A ) SCREAMING_SNAKE_CASE__ = self.conva(__A ) SCREAMING_SNAKE_CASE__ = self.drop(__A ) return hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self :Any , __A :str , __A :List[str] , __A :Tuple , __A :Dict , __A :Union[str, Any] , __A :int ) -> Optional[int]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = PoolFormerPooling(__A ) SCREAMING_SNAKE_CASE__ = PoolFormerOutput(__A , __A , __A , __A ) SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A ) SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A ) # Useful for training neural nets SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A ) if drop_path > 0.0 else nn.Identity() SCREAMING_SNAKE_CASE__ = config.use_layer_scale if config.use_layer_scale: SCREAMING_SNAKE_CASE__ = nn.Parameter( config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A ) SCREAMING_SNAKE_CASE__ = nn.Parameter( config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A ) def _snake_case ( self :Optional[Any] , __A :Optional[int] ) -> str: """simple docstring""" if self.use_layer_scale: SCREAMING_SNAKE_CASE__ = self.pooling(self.before_norm(__A ) ) SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A ) SCREAMING_SNAKE_CASE__ = () SCREAMING_SNAKE_CASE__ = self.output(self.after_norm(__A ) ) SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A ) SCREAMING_SNAKE_CASE__ = (output,) + outputs return outputs else: SCREAMING_SNAKE_CASE__ = self.drop_path(self.pooling(self.before_norm(__A ) ) ) # First residual connection SCREAMING_SNAKE_CASE__ = pooling_output + hidden_states SCREAMING_SNAKE_CASE__ = () # Second residual connection inside the PoolFormerOutput block SCREAMING_SNAKE_CASE__ = self.drop_path(self.output(self.after_norm(__A ) ) ) SCREAMING_SNAKE_CASE__ = hidden_states + layer_output SCREAMING_SNAKE_CASE__ = (output,) + outputs return outputs class UpperCamelCase_ ( nn.Module ): def __init__( self :Union[str, Any] , __A :List[Any] ) -> Union[str, Any]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = config # stochastic depth decay rule SCREAMING_SNAKE_CASE__ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings SCREAMING_SNAKE_CASE__ = [] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A ) # Transformer blocks SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = 0 for i in range(config.num_encoder_blocks ): # each block consists of layers SCREAMING_SNAKE_CASE__ = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( __A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(__A ) ) SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A ) def _snake_case ( self :str , __A :Tuple , __A :Dict=False , __A :Tuple=True ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = () if output_hidden_states else None SCREAMING_SNAKE_CASE__ = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = layers # Get patch embeddings from hidden_states SCREAMING_SNAKE_CASE__ = embedding_layer(__A ) # Send the embeddings through the blocks for _, blk in enumerate(__A ): SCREAMING_SNAKE_CASE__ = blk(__A ) SCREAMING_SNAKE_CASE__ = layer_outputs[0] if output_hidden_states: SCREAMING_SNAKE_CASE__ = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=__A , hidden_states=__A ) class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = PoolFormerConfig lowerCamelCase_ = "poolformer" lowerCamelCase_ = "pixel_values" lowerCamelCase_ = True def _snake_case ( self :Optional[Any] , __A :Tuple ) -> Dict: """simple docstring""" if isinstance(__A , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__A , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def _snake_case ( self :str , __A :Optional[Any] , __A :Union[str, Any]=False ) -> Any: """simple docstring""" if isinstance(__A , __A ): SCREAMING_SNAKE_CASE__ = value _lowerCamelCase = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' _lowerCamelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n' @add_start_docstrings( "The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , ) class UpperCamelCase_ ( UpperCamelCase__ ): def __init__( self :Union[str, Any] , __A :Any ) -> int: """simple docstring""" super().__init__(__A ) SCREAMING_SNAKE_CASE__ = config SCREAMING_SNAKE_CASE__ = PoolFormerEncoder(__A ) # Initialize weights and apply final processing self.post_init() def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(__A ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__A , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _snake_case ( self :Dict , __A :Optional[torch.FloatTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]: """simple docstring""" SCREAMING_SNAKE_CASE__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("""You have to specify pixel_values""" ) SCREAMING_SNAKE_CASE__ = self.encoder( __A , output_hidden_states=__A , return_dict=__A , ) SCREAMING_SNAKE_CASE__ = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=__A , hidden_states=encoder_outputs.hidden_states , ) class UpperCamelCase_ ( nn.Module ): def __init__( self :int , __A :Optional[int] ) -> Tuple: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = nn.Linear(config.hidden_size , config.hidden_size ) def _snake_case ( self :List[Any] , __A :Dict ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.dense(__A ) return output @add_start_docstrings( "\n PoolFormer Model transformer with an image classification head on top\n " , UpperCamelCase__ , ) class UpperCamelCase_ ( UpperCamelCase__ ): def __init__( self :str , __A :Union[str, Any] ) -> int: """simple docstring""" super().__init__(__A ) SCREAMING_SNAKE_CASE__ = config.num_labels SCREAMING_SNAKE_CASE__ = PoolFormerModel(__A ) # Final norm SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head SCREAMING_SNAKE_CASE__ = ( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__A ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _snake_case ( self :int , __A :Optional[torch.FloatTensor] = None , __A :Optional[torch.LongTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]: """simple docstring""" SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict SCREAMING_SNAKE_CASE__ = self.poolformer( __A , output_hidden_states=__A , return_dict=__A , ) SCREAMING_SNAKE_CASE__ = outputs[0] SCREAMING_SNAKE_CASE__ = self.classifier(self.norm(__A ).mean([-2, -1] ) ) SCREAMING_SNAKE_CASE__ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: SCREAMING_SNAKE_CASE__ = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): SCREAMING_SNAKE_CASE__ = """single_label_classification""" else: SCREAMING_SNAKE_CASE__ = """multi_label_classification""" if self.config.problem_type == "regression": SCREAMING_SNAKE_CASE__ = MSELoss() if self.num_labels == 1: SCREAMING_SNAKE_CASE__ = loss_fct(logits.squeeze() , labels.squeeze() ) else: SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A ) elif self.config.problem_type == "single_label_classification": SCREAMING_SNAKE_CASE__ = CrossEntropyLoss() SCREAMING_SNAKE_CASE__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": SCREAMING_SNAKE_CASE__ = BCEWithLogitsLoss() SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A ) if not return_dict: SCREAMING_SNAKE_CASE__ = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=__A , logits=__A , hidden_states=outputs.hidden_states )
6
0
import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class __snake_case : '''simple docstring''' def __init__( self , A_ , A_=13 , A_=64 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=[1, 16, 4, 4] , A_=None , ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = parent SCREAMING_SNAKE_CASE__ = batch_size SCREAMING_SNAKE_CASE__ = image_size SCREAMING_SNAKE_CASE__ = patch_size SCREAMING_SNAKE_CASE__ = num_channels SCREAMING_SNAKE_CASE__ = is_training SCREAMING_SNAKE_CASE__ = use_labels SCREAMING_SNAKE_CASE__ = hidden_size SCREAMING_SNAKE_CASE__ = num_hidden_layers SCREAMING_SNAKE_CASE__ = num_attention_heads SCREAMING_SNAKE_CASE__ = intermediate_size SCREAMING_SNAKE_CASE__ = hidden_act SCREAMING_SNAKE_CASE__ = hidden_dropout_prob SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ = type_sequence_label_size SCREAMING_SNAKE_CASE__ = initializer_range SCREAMING_SNAKE_CASE__ = scope SCREAMING_SNAKE_CASE__ = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size SCREAMING_SNAKE_CASE__ = (self.image_size // 32) ** 2 SCREAMING_SNAKE_CASE__ = num_patches + 1 def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ = None if self.use_labels: SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ = self.get_config() return config, pixel_values, labels def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = { '''global_padding''': '''same''', '''layer_type''': '''bottleneck''', '''depths''': [3, 4, 9], '''out_features''': ['''stage1''', '''stage2''', '''stage3'''], '''embedding_dynamic_padding''': True, '''hidden_sizes''': [4, 8, 16, 32], '''num_groups''': 2, } return ViTHybridConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=A_ , ) def lowercase_ ( self , A_ , A_ , A_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = ViTHybridModel(config=A_ ) model.to(A_ ) model.eval() SCREAMING_SNAKE_CASE__ = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase_ ( self , A_ , A_ , A_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = self.type_sequence_label_size SCREAMING_SNAKE_CASE__ = ViTHybridForImageClassification(A_ ) model.to(A_ ) model.eval() SCREAMING_SNAKE_CASE__ = model(A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = config_and_inputs SCREAMING_SNAKE_CASE__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ : Tuple = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () lowerCamelCase__ : Any = ( {"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification} if is_torch_available() else {} ) lowerCamelCase__ : Union[str, Any] = False lowerCamelCase__ : Optional[Any] = False lowerCamelCase__ : int = False def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = ViTHybridModelTester(self ) SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 ) def lowercase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def lowercase_ ( self ): '''simple docstring''' pass def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ = model_class(A_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) SCREAMING_SNAKE_CASE__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A_ , nn.Linear ) ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ = model_class(A_ ) SCREAMING_SNAKE_CASE__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , A_ ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A_ ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ = _config_zero_init(A_ ) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ = model_class(config=A_ ) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": SCREAMING_SNAKE_CASE__ = [f'''{name}.{key}''' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @slow def lowercase_ ( self ): '''simple docstring''' for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ = ViTHybridModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def __snake_case ( ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __snake_case ( unittest.TestCase ): '''simple docstring''' @cached_property def lowercase_ ( self ): '''simple docstring''' return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( A_ ) SCREAMING_SNAKE_CASE__ = self.default_image_processor SCREAMING_SNAKE_CASE__ = prepare_img() SCREAMING_SNAKE_CASE__ = image_processor(images=A_ , return_tensors='''pt''' ).to(A_ ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE__ = model(**A_ ) # verify the logits SCREAMING_SNAKE_CASE__ = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , A_ ) SCREAMING_SNAKE_CASE__ = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(A_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) ) @slow @require_accelerate def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' ) SCREAMING_SNAKE_CASE__ = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''' ) SCREAMING_SNAKE_CASE__ = prepare_img() SCREAMING_SNAKE_CASE__ = image_processor(images=A_ , return_tensors='''pt''' ) SCREAMING_SNAKE_CASE__ = model(**A_ ) SCREAMING_SNAKE_CASE__ = outputs.logits # model predicts one of the 1000 ImageNet classes SCREAMING_SNAKE_CASE__ = logits.argmax(-1 ).item() self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''' )
100
import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ ( UpperCamelCase__ ): def __init__( self :Union[str, Any] , __A :Optional[int] , __A :Tuple=13 , __A :Dict=7 , __A :Dict=True , __A :str=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Any=False , __A :Dict=False , __A :Any=False , __A :Tuple=2 , __A :Dict=99 , __A :Optional[Any]=0 , __A :List[str]=32 , __A :Optional[int]=5 , __A :Dict=4 , __A :List[str]=0.1 , __A :Union[str, Any]=0.1 , __A :Tuple=512 , __A :Any=12 , __A :Optional[int]=2 , __A :Union[str, Any]=0.0_2 , __A :Dict=3 , __A :Optional[int]=4 , __A :Any="last" , __A :List[Any]=None , __A :Any=None , ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = parent SCREAMING_SNAKE_CASE__ = batch_size SCREAMING_SNAKE_CASE__ = seq_length SCREAMING_SNAKE_CASE__ = is_training SCREAMING_SNAKE_CASE__ = use_input_lengths SCREAMING_SNAKE_CASE__ = use_token_type_ids SCREAMING_SNAKE_CASE__ = use_labels SCREAMING_SNAKE_CASE__ = gelu_activation SCREAMING_SNAKE_CASE__ = sinusoidal_embeddings SCREAMING_SNAKE_CASE__ = causal SCREAMING_SNAKE_CASE__ = asm SCREAMING_SNAKE_CASE__ = n_langs SCREAMING_SNAKE_CASE__ = vocab_size SCREAMING_SNAKE_CASE__ = n_special SCREAMING_SNAKE_CASE__ = hidden_size SCREAMING_SNAKE_CASE__ = num_hidden_layers SCREAMING_SNAKE_CASE__ = num_attention_heads SCREAMING_SNAKE_CASE__ = hidden_dropout_prob SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ = max_position_embeddings SCREAMING_SNAKE_CASE__ = type_vocab_size SCREAMING_SNAKE_CASE__ = type_sequence_label_size SCREAMING_SNAKE_CASE__ = initializer_range SCREAMING_SNAKE_CASE__ = num_labels SCREAMING_SNAKE_CASE__ = num_choices SCREAMING_SNAKE_CASE__ = summary_type SCREAMING_SNAKE_CASE__ = use_proj SCREAMING_SNAKE_CASE__ = scope def _snake_case ( self :Optional[Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE__ = None if self.use_input_lengths: SCREAMING_SNAKE_CASE__ = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length SCREAMING_SNAKE_CASE__ = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None if self.use_labels: SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , 2 ).float() SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE__ = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _snake_case ( self :List[str] ) -> Optional[int]: """simple docstring""" return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def _snake_case ( self :Tuple , __A :str , __A :int , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[int] , __A :Union[str, Any] , __A :Union[str, Any] , __A :str , ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertModel(config=__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A , lengths=__A , langs=__A ) SCREAMING_SNAKE_CASE__ = model(__A , langs=__A ) SCREAMING_SNAKE_CASE__ = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self :str , __A :Any , __A :str , __A :Union[str, Any] , __A :Optional[Any] , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[Any] , __A :Union[str, Any] , ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertWithLMHeadModel(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self :Tuple , __A :Union[str, Any] , __A :Optional[Any] , __A :Dict , __A :Dict , __A :Union[str, Any] , __A :List[str] , __A :Optional[int] , __A :int , __A :str , ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnsweringSimple(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A ) SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self :List[str] , __A :Any , __A :int , __A :Tuple , __A :Optional[Any] , __A :Tuple , __A :Optional[int] , __A :str , __A :int , __A :str , ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnswering(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A ) SCREAMING_SNAKE_CASE__ = model( __A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , p_mask=__A , ) SCREAMING_SNAKE_CASE__ = model( __A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , ) ((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple() SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A ) ((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def _snake_case ( self :Optional[int] , __A :str , __A :Optional[int] , __A :Tuple , __A :Dict , __A :List[str] , __A :Tuple , __A :List[str] , __A :Dict , __A :List[str] , ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertForSequenceClassification(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A ) SCREAMING_SNAKE_CASE__ = model(__A , labels=__A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _snake_case ( self :Optional[Any] , __A :Optional[Any] , __A :Optional[Any] , __A :List[str] , __A :Optional[Any] , __A :int , __A :Tuple , __A :Optional[int] , __A :Union[str, Any] , __A :Dict , ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.num_labels SCREAMING_SNAKE_CASE__ = FlaubertForTokenClassification(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self :str , __A :Any , __A :Tuple , __A :List[str] , __A :Tuple , __A :Any , __A :int , __A :Dict , __A :List[str] , __A :Tuple , ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.num_choices SCREAMING_SNAKE_CASE__ = FlaubertForMultipleChoice(config=__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ = model( __A , attention_mask=__A , token_type_ids=__A , labels=__A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _snake_case ( self :Union[str, Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) = config_and_inputs SCREAMING_SNAKE_CASE__ = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): lowerCamelCase_ = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) lowerCamelCase_ = ( { "feature-extraction": FlaubertModel, "fill-mask": FlaubertWithLMHeadModel, "question-answering": FlaubertForQuestionAnsweringSimple, "text-classification": FlaubertForSequenceClassification, "token-classification": FlaubertForTokenClassification, "zero-shot": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def _snake_case ( self :Any , __A :Optional[int] , __A :Optional[int] , __A :Dict , __A :List[Any] , __A :Tuple ) -> str: """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _snake_case ( self :Tuple , __A :List[str] , __A :Optional[int] , __A :Dict=False ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(__A , __A , return_labels=__A ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": SCREAMING_SNAKE_CASE__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__A ) SCREAMING_SNAKE_CASE__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__A ) return inputs_dict def _snake_case ( self :str ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertModelTester(self ) SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__A , emb_dim=37 ) def _snake_case ( self :int ) -> int: """simple docstring""" self.config_tester.run_common_tests() def _snake_case ( self :Optional[Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*__A ) def _snake_case ( self :Tuple ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*__A ) def _snake_case ( self :str ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*__A ) def _snake_case ( self :Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*__A ) def _snake_case ( self :str ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*__A ) def _snake_case ( self :Any ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*__A ) def _snake_case ( self :Any ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*__A ) @slow def _snake_case ( self :Union[str, Any] ) -> List[str]: """simple docstring""" for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @slow @require_torch_gpu def _snake_case ( self :Tuple ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = model_class(config=__A ) SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__A , __A ) SCREAMING_SNAKE_CASE__ = torch.jit.trace( __A , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__A , os.path.join(__A , """traced_model.pt""" ) ) SCREAMING_SNAKE_CASE__ = torch.jit.load(os.path.join(__A , """traced_model.pt""" ) , map_location=__A ) loaded(inputs_dict["""input_ids"""].to(__A ) , inputs_dict["""attention_mask"""].to(__A ) ) @require_torch class UpperCamelCase_ ( unittest.TestCase ): @slow def _snake_case ( self :Dict ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" ) SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) with torch.no_grad(): SCREAMING_SNAKE_CASE__ = model(__A )[0] SCREAMING_SNAKE_CASE__ = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __A ) SCREAMING_SNAKE_CASE__ = torch.tensor( [[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1E-4 ) )
6
0
import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig lowerCAmelCase__ : Optional[Any] =logging.get_logger(__name__) lowerCAmelCase__ : Dict ='T5Config' def a__ ( A__, A__, A__ ): SCREAMING_SNAKE_CASE_ : Optional[int] = jnp.zeros_like(A__ ) SCREAMING_SNAKE_CASE_ : List[str] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) SCREAMING_SNAKE_CASE_ : List[Any] = shifted_input_ids.at[:, 0].set(A__ ) SCREAMING_SNAKE_CASE_ : Optional[int] = jnp.where(shifted_input_ids == -1_0_0, A__, A__ ) return shifted_input_ids class __lowercase (__SCREAMING_SNAKE_CASE ): """simple docstring""" _UpperCAmelCase = """mt5""" _UpperCAmelCase = MTaConfig class __lowercase (__SCREAMING_SNAKE_CASE ): """simple docstring""" _UpperCAmelCase = """mt5""" _UpperCAmelCase = MTaConfig class __lowercase (__SCREAMING_SNAKE_CASE ): """simple docstring""" _UpperCAmelCase = """mt5""" _UpperCAmelCase = MTaConfig
101
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Union[str, Any] ): for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})''' def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple=True ): model.train() SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = F.mse_loss(UpperCamelCase__ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: List[Any]=False ): set_seed(42 ) SCREAMING_SNAKE_CASE__ = RegressionModel() SCREAMING_SNAKE_CASE__ = deepcopy(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 ) SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 ) model.to(accelerator.device ) if sched: SCREAMING_SNAKE_CASE__ = AdamW(params=model.parameters() , lr=1e-3 ) SCREAMING_SNAKE_CASE__ = AdamW(params=ddp_model.parameters() , lr=1e-3 ) SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 ) SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 ) # Make a copy of `model` if sched: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ): # Test when on a single CPU or GPU that the context manager does nothing SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ ) # Use a single batch SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: # Sync grads step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )] def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ): # Test on distributed setup that context manager behaves properly SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ ) # Use a single batch SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: # Sync grads step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )] def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int=False , UpperCamelCase__: Union[str, Any]=False ): SCREAMING_SNAKE_CASE__ = Accelerator( split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ ) for iteration, batch in enumerate(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values() # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCamelCase__ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )] GradientState._reset_state() def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple=False , UpperCamelCase__: List[str]=False ): SCREAMING_SNAKE_CASE__ = Accelerator( split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ , UpperCamelCase__ ) for iteration, batch in enumerate(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values() # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCamelCase__ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n''' SCREAMING_SNAKE_CASE__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCamelCase__ )) if accelerator.num_processes > 1: check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) GradientState._reset_state() def SCREAMING_SNAKE_CASE__ ( ): SCREAMING_SNAKE_CASE__ = Accelerator() SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 ) SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 ) SCREAMING_SNAKE_CASE__ = RegressionDataset(length=96 ) SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(UpperCamelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ ) if iteration < len(UpperCamelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(UpperCamelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ ) if batch_num < len(UpperCamelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def SCREAMING_SNAKE_CASE__ ( ): SCREAMING_SNAKE_CASE__ = Accelerator() SCREAMING_SNAKE_CASE__ = accelerator.state if state.local_process_index == 0: print("""**Test `accumulate` gradient accumulation with dataloader break**""" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("""**Test NOOP `no_sync` context manager**""" ) test_noop_sync(UpperCamelCase__ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("""**Test Distributed `no_sync` context manager**""" ) test_distributed_sync(UpperCamelCase__ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation(UpperCamelCase__ , UpperCamelCase__ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation_with_opt_and_scheduler(UpperCamelCase__ , UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
6
0
"""simple docstring""" __magic_name__ : dict[tuple[int, int, int], int] = {} def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): # if we are absent twice, or late 3 consecutive days, # no further prize strings are possible if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on UpperCamelCase : Any = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one UpperCamelCase : Dict = _calculate(days - 1 , SCREAMING_SNAKE_CASE , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 UpperCamelCase : int = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter UpperCamelCase : Dict = _calculate(days - 1 , SCREAMING_SNAKE_CASE , 0 ) UpperCamelCase : Optional[int] = state_late + state_absent + state_ontime UpperCamelCase : Any = prizestrings return prizestrings def UpperCamelCase (SCREAMING_SNAKE_CASE = 30 ): return _calculate(SCREAMING_SNAKE_CASE , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
102
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = ["image_processor", "tokenizer"] lowerCamelCase_ = "AutoImageProcessor" lowerCamelCase_ = "AutoTokenizer" def __init__( self :Optional[int] , __A :Optional[Any] , __A :Dict ) -> Dict: """simple docstring""" super().__init__(__A , __A ) SCREAMING_SNAKE_CASE__ = self.image_processor def __call__( self :int , __A :str=None , __A :int=None , __A :Union[str, Any]=None , **__A :str ) -> Optional[Any]: """simple docstring""" if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , return_tensors=__A , **__A ) if images is not None: SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A ) if text is not None and images is not None: SCREAMING_SNAKE_CASE__ = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__A ) , tensor_type=__A ) def _snake_case ( self :str , *__A :List[str] , **__A :List[str] ) -> List[Any]: """simple docstring""" return self.tokenizer.batch_decode(*__A , **__A ) def _snake_case ( self :List[str] , *__A :Any , **__A :Any ) -> Tuple: """simple docstring""" return self.tokenizer.decode(*__A , **__A ) @property def _snake_case ( self :Dict ) -> List[Any]: """simple docstring""" return ["input_ids", "attention_mask", "pixel_values"]
6
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case = logging.get_logger(__name__) snake_case = { '''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''', } class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): A__ : List[str] = '''switch_transformers''' A__ : List[str] = ['''past_key_values'''] A__ : Any = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : List[Any] , __lowerCamelCase : Dict=3_2_1_2_8 , __lowerCamelCase : Optional[int]=7_6_8 , __lowerCamelCase : List[str]=6_4 , __lowerCamelCase : Optional[int]=2_0_4_8 , __lowerCamelCase : Any=6_4 , __lowerCamelCase : str=1_2 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : int=1_2 , __lowerCamelCase : str=3 , __lowerCamelCase : List[str]=1_2 , __lowerCamelCase : int=8 , __lowerCamelCase : Dict=False , __lowerCamelCase : Optional[int]=0.0_1 , __lowerCamelCase : int="float32" , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=3_2 , __lowerCamelCase : int=1_2_8 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Dict=1E-6 , __lowerCamelCase : List[str]=0.0_0_1 , __lowerCamelCase : Optional[int]=0.0_0_1 , __lowerCamelCase : Dict=1.0 , __lowerCamelCase : Optional[int]="relu" , __lowerCamelCase : List[str]=True , __lowerCamelCase : int=False , __lowerCamelCase : Any=True , __lowerCamelCase : str=0 , __lowerCamelCase : List[Any]=1 , **__lowerCamelCase : Optional[int] , ): """simple docstring""" _snake_case = vocab_size _snake_case = d_model _snake_case = d_kv _snake_case = d_ff _snake_case = num_sparse_encoder_layers _snake_case = num_layers _snake_case = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry _snake_case = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: _snake_case = self.num_layers // self.num_sparse_encoder_layers else: _snake_case = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: _snake_case = self.num_decoder_layers // self.num_sparse_decoder_layers else: _snake_case = self.num_decoder_layers # HACK: this will create 0 sparse layers _snake_case = num_heads _snake_case = num_experts _snake_case = expert_capacity _snake_case = router_bias _snake_case = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" ) _snake_case = router_dtype _snake_case = router_ignore_padding_tokens _snake_case = relative_attention_num_buckets _snake_case = relative_attention_max_distance _snake_case = dropout_rate _snake_case = layer_norm_epsilon _snake_case = initializer_factor _snake_case = feed_forward_proj _snake_case = use_cache _snake_case = add_router_probs _snake_case = router_z_loss_coef _snake_case = router_aux_loss_coef _snake_case = self.feed_forward_proj.split('''-''' ) _snake_case = act_info[-1] _snake_case = act_info[0] == '''gated''' if len(__lowerCamelCase ) > 1 and act_info[0] != "gated" or len(__lowerCamelCase ) > 2: raise ValueError( f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": _snake_case = '''gelu_new''' super().__init__( pad_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase , )
103
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ): SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = sum(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): SCREAMING_SNAKE_CASE__ = True for i in range(1 , s + 1 ): SCREAMING_SNAKE_CASE__ = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): SCREAMING_SNAKE_CASE__ = dp[i][j - 1] if arr[i - 1] <= j: SCREAMING_SNAKE_CASE__ = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: SCREAMING_SNAKE_CASE__ = s - 2 * j break return diff
6
0
"""simple docstring""" from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def _lowerCamelCase ( UpperCAmelCase_ : int ) -> Optional[Any]: """simple docstring""" def is_in_circle(UpperCAmelCase_ : float, UpperCAmelCase_ : float ) -> bool: A__ = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle A__ = mean( int(is_in_circle(uniform(-1.0, 1.0 ), uniform(-1.0, 1.0 ) ) ) for _ in range(UpperCAmelCase_ ) ) # The ratio of the area for circle to square is pi/4. A__ = proportion * 4 print(F"""The estimated value of pi is {pi_estimate}""" ) print(F"""The numpy value of pi is {pi}""" ) print(F"""The total error is {abs(pi - pi_estimate )}""" ) def _lowerCamelCase ( UpperCAmelCase_ : int, UpperCAmelCase_ : Callable[[float], float], UpperCAmelCase_ : float = 0.0, UpperCAmelCase_ : float = 1.0, ) -> float: """simple docstring""" return mean( function_to_integrate(uniform(UpperCAmelCase_, UpperCAmelCase_ ) ) for _ in range(UpperCAmelCase_ ) ) * (max_value - min_value) def _lowerCamelCase ( UpperCAmelCase_ : int, UpperCAmelCase_ : float = 0.0, UpperCAmelCase_ : float = 1.0 ) -> None: """simple docstring""" def identity_function(UpperCAmelCase_ : float ) -> float: return x A__ = area_under_curve_estimator( UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ ) A__ = (max_value * max_value - min_value * min_value) / 2 print("******************" ) print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" ) print(F"""Estimated value is {estimated_value}""" ) print(F"""Expected value is {expected_value}""" ) print(F"""Total error is {abs(estimated_value - expected_value )}""" ) print("******************" ) def _lowerCamelCase ( UpperCAmelCase_ : int ) -> None: """simple docstring""" def function_to_integrate(UpperCAmelCase_ : float ) -> float: return sqrt(4.0 - x * x ) A__ = area_under_curve_estimator( UpperCAmelCase_, UpperCAmelCase_, 0.0, 2.0 ) print("******************" ) print("Estimating pi using area_under_curve_estimator" ) print(F"""Estimated value is {estimated_value}""" ) print(F"""Expected value is {pi}""" ) print(F"""Total error is {abs(estimated_value - pi )}""" ) print("******************" ) if __name__ == "__main__": import doctest doctest.testmod()
104
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float ): if mass < 0: raise ValueError("""The mass of a body cannot be negative""" ) return 0.5 * mass * abs(UpperCamelCase__ ) * abs(UpperCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
6
0
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def __UpperCAmelCase ( lowerCamelCase_ : str ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = [False] * len(lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : str = [-1] * len(lowerCamelCase_ ) def dfs(lowerCamelCase_ : Any , lowerCamelCase_ : Any ): SCREAMING_SNAKE_CASE_ : str = True SCREAMING_SNAKE_CASE_ : Optional[Any] = c for u in graph[v]: if not visited[u]: dfs(lowerCamelCase_ , 1 - c ) for i in range(len(lowerCamelCase_ ) ): if not visited[i]: dfs(lowerCamelCase_ , 0 ) for i in range(len(lowerCamelCase_ ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph UpperCamelCase__ : Any = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
105
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = "encoder-decoder" lowerCamelCase_ = True def __init__( self :Optional[int] , **__A :str ) -> int: """simple docstring""" super().__init__(**__A ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" SCREAMING_SNAKE_CASE__ = kwargs.pop("""encoder""" ) SCREAMING_SNAKE_CASE__ = encoder_config.pop("""model_type""" ) SCREAMING_SNAKE_CASE__ = kwargs.pop("""decoder""" ) SCREAMING_SNAKE_CASE__ = decoder_config.pop("""model_type""" ) from ..auto.configuration_auto import AutoConfig SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A ) SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A ) SCREAMING_SNAKE_CASE__ = True @classmethod def _snake_case ( cls :str , __A :PretrainedConfig , __A :PretrainedConfig , **__A :List[str] ) -> PretrainedConfig: """simple docstring""" logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" ) SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__A ) def _snake_case ( self :str ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE__ = self.encoder.to_dict() SCREAMING_SNAKE_CASE__ = self.decoder.to_dict() SCREAMING_SNAKE_CASE__ = self.__class__.model_type return output
6
0
# flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter __snake_case :Union[str, Any] =logging.get_logger(__name__) __snake_case :Dict[Optional[str], Type[Formatter]] ={} __snake_case :Dict[Optional[str], str] ={} __snake_case :Dict[Optional[str], Exception] ={} def lowerCamelCase_ ( lowerCAmelCase__ : type , lowerCAmelCase__ : Optional[str] , lowerCAmelCase__ : Optional[List[str]] = None , ) -> Optional[int]: '''simple docstring''' A = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( F'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' ) A = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( F'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' ) A = format_type def lowerCamelCase_ ( lowerCAmelCase__ : Exception , lowerCAmelCase__ : Optional[str] , lowerCAmelCase__ : Optional[List[str]] = None ) -> Optional[int]: '''simple docstring''' A = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): A = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['python']) _register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow']) _register_formatter(NumpyFormatter, 'numpy', aliases=['np']) _register_formatter(PandasFormatter, 'pandas', aliases=['pd']) _register_formatter(CustomFormatter, 'custom') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch']) else: __snake_case :int =ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.') _register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, 'tensorflow', aliases=['tf']) else: __snake_case :List[str] =ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.') _register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, 'jax', aliases=[]) else: __snake_case :Dict =ValueError('JAX needs to be installed to be able to return JAX arrays.') _register_unavailable_formatter(_jax_error, 'jax', aliases=[]) def lowerCamelCase_ ( lowerCAmelCase__ : Optional[str] ) -> Optional[str]: '''simple docstring''' if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def lowerCamelCase_ ( lowerCAmelCase__ : Optional[str] , **lowerCAmelCase__ : Any ) -> Formatter: '''simple docstring''' A = get_format_type_from_alias(lowerCAmelCase__ ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**lowerCAmelCase__ ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( F'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
106
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase__ ) class UpperCamelCase_ ( UpperCamelCase__ ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization lowerCamelCase_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) lowerCamelCase_ = Features({"text": Value("string" )} ) lowerCamelCase_ = Features({"labels": ClassLabel} ) lowerCamelCase_ = "text" lowerCamelCase_ = "labels" def _snake_case ( self :Any , __A :Dict ) -> Optional[Any]: """simple docstring""" if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , __A ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) SCREAMING_SNAKE_CASE__ = copy.deepcopy(self ) SCREAMING_SNAKE_CASE__ = self.label_schema.copy() SCREAMING_SNAKE_CASE__ = features[self.label_column] SCREAMING_SNAKE_CASE__ = label_schema return task_template @property def _snake_case ( self :str ) -> Dict[str, str]: """simple docstring""" return { self.text_column: "text", self.label_column: "labels", }
6
0
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( __snake_case : str , __snake_case : str ): _A = len(__snake_case ) _A = [] for i in range(len(__snake_case ) - pat_len + 1 ): _A = True for j in range(__snake_case ): if s[i + j] != pattern[j]: _A = False break if match_found: position.append(__snake_case ) return position if __name__ == "__main__": assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3] print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
107
import argparse import torch from datasets import load_dataset from donut import DonutModel from transformers import ( DonutImageProcessor, DonutProcessor, DonutSwinConfig, DonutSwinModel, MBartConfig, MBartForCausalLM, VisionEncoderDecoderModel, XLMRobertaTokenizerFast, ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): SCREAMING_SNAKE_CASE__ = model.config SCREAMING_SNAKE_CASE__ = DonutSwinConfig( image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , ) SCREAMING_SNAKE_CASE__ = MBartConfig( is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , add_cross_attention=UpperCamelCase__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len( model.decoder.tokenizer ) , scale_embedding=UpperCamelCase__ , add_final_layer_norm=UpperCamelCase__ , ) return encoder_config, decoder_config def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ): if "encoder.model" in name: SCREAMING_SNAKE_CASE__ = name.replace("""encoder.model""" , """encoder""" ) if "decoder.model" in name: SCREAMING_SNAKE_CASE__ = name.replace("""decoder.model""" , """decoder""" ) if "patch_embed.proj" in name: SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if name.startswith("""encoder""" ): if "layers" in name: SCREAMING_SNAKE_CASE__ = """encoder.""" + name if "attn.proj" in name: SCREAMING_SNAKE_CASE__ = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name and "mask" not in name: SCREAMING_SNAKE_CASE__ = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: SCREAMING_SNAKE_CASE__ = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: SCREAMING_SNAKE_CASE__ = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc2""" , """output.dense""" ) if name == "encoder.norm.weight": SCREAMING_SNAKE_CASE__ = """encoder.layernorm.weight""" if name == "encoder.norm.bias": SCREAMING_SNAKE_CASE__ = """encoder.layernorm.bias""" return name def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Optional[int] ): for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE__ = orig_state_dict.pop(UpperCamelCase__ ) if "qkv" in key: SCREAMING_SNAKE_CASE__ = key.split(""".""" ) SCREAMING_SNAKE_CASE__ = int(key_split[3] ) SCREAMING_SNAKE_CASE__ = int(key_split[5] ) SCREAMING_SNAKE_CASE__ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: SCREAMING_SNAKE_CASE__ = val[:dim, :] SCREAMING_SNAKE_CASE__ = val[dim : dim * 2, :] SCREAMING_SNAKE_CASE__ = val[-dim:, :] else: SCREAMING_SNAKE_CASE__ = val[:dim] SCREAMING_SNAKE_CASE__ = val[dim : dim * 2] SCREAMING_SNAKE_CASE__ = val[-dim:] elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]: # HuggingFace implementation doesn't use attn_mask buffer # and model doesn't use final LayerNorms for the encoder pass else: SCREAMING_SNAKE_CASE__ = val return orig_state_dict def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int=None , UpperCamelCase__: str=False ): # load original model SCREAMING_SNAKE_CASE__ = DonutModel.from_pretrained(UpperCamelCase__ ).eval() # load HuggingFace model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_configs(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = DonutSwinModel(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = MBartForCausalLM(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ ) model.eval() SCREAMING_SNAKE_CASE__ = original_model.state_dict() SCREAMING_SNAKE_CASE__ = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ ) model.load_state_dict(UpperCamelCase__ ) # verify results on scanned document SCREAMING_SNAKE_CASE__ = load_dataset("""hf-internal-testing/example-documents""" ) SCREAMING_SNAKE_CASE__ = dataset["""test"""][0]["""image"""].convert("""RGB""" ) SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizerFast.from_pretrained(UpperCamelCase__ , from_slow=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = DonutImageProcessor( do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] ) SCREAMING_SNAKE_CASE__ = DonutProcessor(UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = processor(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values if model_name == "naver-clova-ix/donut-base-finetuned-docvqa": SCREAMING_SNAKE_CASE__ = """<s_docvqa><s_question>{user_input}</s_question><s_answer>""" SCREAMING_SNAKE_CASE__ = """When is the coffee break?""" SCREAMING_SNAKE_CASE__ = task_prompt.replace("""{user_input}""" , UpperCamelCase__ ) elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip": SCREAMING_SNAKE_CASE__ = """<s_rvlcdip>""" elif model_name in [ "naver-clova-ix/donut-base-finetuned-cord-v1", "naver-clova-ix/donut-base-finetuned-cord-v1-2560", ]: SCREAMING_SNAKE_CASE__ = """<s_cord>""" elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2": SCREAMING_SNAKE_CASE__ = """s_cord-v2>""" elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket": SCREAMING_SNAKE_CASE__ = """<s_zhtrainticket>""" elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]: # use a random prompt SCREAMING_SNAKE_CASE__ = """hello world""" else: raise ValueError("""Model name not supported""" ) SCREAMING_SNAKE_CASE__ = original_model.decoder.tokenizer(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors="""pt""" )[ """input_ids""" ] SCREAMING_SNAKE_CASE__ = original_model.encoder.model.patch_embed(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.encoder.embeddings(UpperCamelCase__ ) assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) # verify encoder hidden states SCREAMING_SNAKE_CASE__ = original_model.encoder(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = model.encoder(UpperCamelCase__ ).last_hidden_state assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-2 ) # verify decoder hidden states SCREAMING_SNAKE_CASE__ = original_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).logits SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCamelCase__ ) processor.save_pretrained(UpperCamelCase__ ) if push_to_hub: model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" ) processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" ) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='naver-clova-ix/donut-base-finetuned-docvqa', required=False, type=str, help='Name of the original model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, required=False, type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model and processor to the 🤗 hub.', ) _lowerCamelCase = parser.parse_args() convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
6
0
from __future__ import annotations import string from itertools import cycle, product from pathlib import Path __a: str = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) __a: list[int] = [ord(letter) for letter in string.ascii_lowercase] __a: set[int] = {ord(char) for char in VALID_CHARS} __a: list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> str | None: _UpperCAmelCase = "" _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 for keychar, cipherchar in zip(cycle(__snake_case ) , __snake_case ): _UpperCAmelCase = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(__snake_case ) return decoded def _SCREAMING_SNAKE_CASE ( __snake_case ) -> list[str]: _UpperCAmelCase = [] for key in product(__snake_case , repeat=3 ): _UpperCAmelCase = try_key(__snake_case , __snake_case ) if encoded is not None: possibles.append(__snake_case ) return possibles def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> list[str]: return [possible for possible in possibles if common_word in possible.lower()] def _SCREAMING_SNAKE_CASE ( __snake_case = "p059_cipher.txt" ) -> int: _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = 42 _UpperCAmelCase = Path(__snake_case ).parent.joinpath(__snake_case ).read_text(encoding="""utf-8""" ) _UpperCAmelCase = [int(__snake_case ) for number in data.strip().split(""",""" )] _UpperCAmelCase = filter_valid_chars(__snake_case ) for common_word in COMMON_WORDS: _UpperCAmelCase = filter_common_word(__snake_case , __snake_case ) if len(__snake_case ) == 1: break _UpperCAmelCase = possibles[0] return sum(ord(__snake_case ) for char in decoded_text ) if __name__ == "__main__": print(F"{solution() = }")
108
import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class UpperCamelCase_ ( unittest.TestCase ): def _snake_case ( self :Union[str, Any] ) -> List[str]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self :Any ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" ) SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) sd_pipe.set_scheduler("""sample_euler""" ) SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger""" SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" ) SCREAMING_SNAKE_CASE__ = output.images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _snake_case ( self :str ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) sd_pipe.set_scheduler("""sample_euler""" ) SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger""" SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" ) SCREAMING_SNAKE_CASE__ = output.images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1 def _snake_case ( self :Tuple ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) sd_pipe.set_scheduler("""sample_dpmpp_2m""" ) SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger""" SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = sd_pipe( [prompt] , generator=__A , guidance_scale=7.5 , num_inference_steps=15 , output_type="""np""" , use_karras_sigmas=__A , ) SCREAMING_SNAKE_CASE__ = output.images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ = np.array( [0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
6
0
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional from packaging import version if TYPE_CHECKING: from ... import PreTrainedTokenizer, TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import is_torch_available, logging a = logging.get_logger(__name__) a = { "bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json", "bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json", "bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json", "bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json", "bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json", "bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json", } class __a ( _snake_case ): __UpperCamelCase : List[str] = 'bloom' __UpperCamelCase : Optional[Any] = ['past_key_values'] __UpperCamelCase : Optional[int] = { 'num_hidden_layers': 'n_layer', 'num_attention_heads': 'n_head', } def __init__( self : Union[str, Any] ,lowerCamelCase : str=25_0880 ,lowerCamelCase : List[Any]=64 ,lowerCamelCase : Optional[Any]=2 ,lowerCamelCase : Tuple=8 ,lowerCamelCase : Union[str, Any]=1E-5 ,lowerCamelCase : Optional[Any]=0.02 ,lowerCamelCase : str=True ,lowerCamelCase : List[Any]=1 ,lowerCamelCase : Union[str, Any]=2 ,lowerCamelCase : Optional[int]=False ,lowerCamelCase : Optional[int]=0.0 ,lowerCamelCase : List[Any]=0.0 ,lowerCamelCase : Optional[Any]=1 ,lowerCamelCase : Tuple=False ,**lowerCamelCase : str ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = vocab_size # Backward compatibility with n_embed kwarg __SCREAMING_SNAKE_CASE = kwargs.pop("""n_embed""" ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = hidden_size if n_embed is None else n_embed __SCREAMING_SNAKE_CASE = n_layer __SCREAMING_SNAKE_CASE = n_head __SCREAMING_SNAKE_CASE = layer_norm_epsilon __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = use_cache __SCREAMING_SNAKE_CASE = pretraining_tp __SCREAMING_SNAKE_CASE = apply_residual_connection_post_layernorm __SCREAMING_SNAKE_CASE = hidden_dropout __SCREAMING_SNAKE_CASE = attention_dropout __SCREAMING_SNAKE_CASE = bos_token_id __SCREAMING_SNAKE_CASE = eos_token_id __SCREAMING_SNAKE_CASE = slow_but_exact super().__init__(bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ,**lowerCamelCase ) class __a ( _snake_case ): __UpperCamelCase : int = version.parse('1.12' ) def __init__( self : Optional[int] ,lowerCamelCase : PretrainedConfig ,lowerCamelCase : str = "default" ,lowerCamelCase : List[PatchingSpec] = None ,lowerCamelCase : bool = False ,): '''simple docstring''' super().__init__(lowerCamelCase ,task=lowerCamelCase ,patching_specs=lowerCamelCase ,use_past=lowerCamelCase ) if not getattr(self._config ,"""pad_token_id""" ,lowerCamelCase ): # TODO: how to do that better? __SCREAMING_SNAKE_CASE = 0 @property def UpperCAmelCase__ ( self : int ): '''simple docstring''' __SCREAMING_SNAKE_CASE = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: # BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344 self.fill_with_past_key_values_(lowerCamelCase ,direction="""inputs""" ,inverted_values_shape=lowerCamelCase ) __SCREAMING_SNAKE_CASE = {0: """batch""", 1: """past_sequence + sequence"""} else: __SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""} return common_inputs @property def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' return self._config.n_layer @property def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' return self._config.n_head @property def UpperCAmelCase__ ( self : str ): '''simple docstring''' return 1E-3 def UpperCAmelCase__ ( self : str ,lowerCamelCase : "PreTrainedTokenizer" ,lowerCamelCase : int = -1 ,lowerCamelCase : int = -1 ,lowerCamelCase : bool = False ,lowerCamelCase : Optional["TensorType"] = None ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = super(lowerCamelCase ,self ).generate_dummy_inputs( lowerCamelCase ,batch_size=lowerCamelCase ,seq_length=lowerCamelCase ,is_pair=lowerCamelCase ,framework=lowerCamelCase ) # We need to order the input in the way they appears in the forward() __SCREAMING_SNAKE_CASE = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values __SCREAMING_SNAKE_CASE = seqlen + 2 __SCREAMING_SNAKE_CASE = self._config.hidden_size // self.num_attention_heads __SCREAMING_SNAKE_CASE = ( batch * self.num_attention_heads, head_dim, past_key_values_length, ) __SCREAMING_SNAKE_CASE = ( batch * self.num_attention_heads, past_key_values_length, head_dim, ) __SCREAMING_SNAKE_CASE = [ (torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(self.num_layers ) ] __SCREAMING_SNAKE_CASE = common_inputs["""attention_mask"""] if self.use_past: __SCREAMING_SNAKE_CASE = ordered_inputs["""attention_mask"""].dtype __SCREAMING_SNAKE_CASE = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(lowerCamelCase ,lowerCamelCase ,dtype=lowerCamelCase )] ,dim=1 ) return ordered_inputs @property def UpperCAmelCase__ ( self : str ): '''simple docstring''' return 13
109
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int = 600_851_475_143 ): try: SCREAMING_SNAKE_CASE__ = int(UpperCamelCase__ ) except (TypeError, ValueError): raise TypeError("""Parameter n must be int or castable to int.""" ) if n <= 0: raise ValueError("""Parameter n must be greater than or equal to one.""" ) SCREAMING_SNAKE_CASE__ = 1 SCREAMING_SNAKE_CASE__ = 2 while i * i <= n: while n % i == 0: SCREAMING_SNAKE_CASE__ = i n //= i i += 1 if n > 1: SCREAMING_SNAKE_CASE__ = n return int(UpperCamelCase__ ) if __name__ == "__main__": print(F'''{solution() = }''')
6
0
"""simple docstring""" # This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES UpperCamelCase__ = 'tiny-wmt19-en-ru' # Build # borrowed from a test UpperCamelCase__ = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] UpperCamelCase__ = dict(zip(vocab, range(len(vocab)))) UpperCamelCase__ = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase__ = Path(tmpdirname) UpperCamelCase__ = build_dir / VOCAB_FILES_NAMES['src_vocab_file'] UpperCamelCase__ = build_dir / VOCAB_FILES_NAMES['tgt_vocab_file'] UpperCamelCase__ = build_dir / VOCAB_FILES_NAMES['merges_file'] with open(src_vocab_file, 'w') as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, 'w') as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, 'w') as fp: fp.write('\n'.join(merges)) UpperCamelCase__ = FSMTTokenizer( langs=['en', 'ru'], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) UpperCamelCase__ = FSMTConfig( langs=['ru', 'en'], src_vocab_size=10_00, tgt_vocab_size=10_00, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) UpperCamelCase__ = FSMTForConditionalGeneration(config) print(f'num of params {tiny_model.num_parameters()}') # Test UpperCamelCase__ = tokenizer(['Making tiny model'], return_tensors='pt') UpperCamelCase__ = tiny_model(**batch) print('test output:', len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f'Generated {mname_tiny}') # Upload # transformers-cli upload tiny-wmt19-en-ru
110
import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class UpperCamelCase_ ( unittest.TestCase ): def _snake_case ( self :Tuple ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertTrue(is_safetensors_compatible(__A ) ) def _snake_case ( self :List[str] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertTrue(is_safetensors_compatible(__A ) ) def _snake_case ( self :Tuple ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", """unet/diffusion_pytorch_model.bin""", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__A ) ) def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", ] self.assertTrue(is_safetensors_compatible(__A ) ) def _snake_case ( self :Optional[Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", # Removed: 'text_encoder/model.safetensors', """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertFalse(is_safetensors_compatible(__A ) ) def _snake_case ( self :Tuple ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :Any ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :str ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :List[Any] ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", """unet/diffusion_pytorch_model.fp16.bin""", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertFalse(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :str ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :Optional[int] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", # 'text_encoder/model.fp16.safetensors', """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertFalse(is_safetensors_compatible(__A , variant=__A ) )
6
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __a: int = logging.get_logger(__name__) __a: int = { """google/pegasus-large""": """https://huggingface.co/google/pegasus-large/resolve/main/config.json""", # See all PEGASUS models at https://huggingface.co/models?filter=pegasus } class UpperCAmelCase ( UpperCamelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE = "pegasus" SCREAMING_SNAKE_CASE = ["past_key_values"] SCREAMING_SNAKE_CASE = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , __lowerCAmelCase=50265 , __lowerCAmelCase=1024 , __lowerCAmelCase=12 , __lowerCAmelCase=4096 , __lowerCAmelCase=16 , __lowerCAmelCase=12 , __lowerCAmelCase=4096 , __lowerCAmelCase=16 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase="gelu" , __lowerCAmelCase=1024 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=0 , __lowerCAmelCase=False , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=1 , **__lowerCAmelCase , ) -> str: lowercase__ : Dict = vocab_size lowercase__ : List[Any] = max_position_embeddings lowercase__ : Any = d_model lowercase__ : int = encoder_ffn_dim lowercase__ : int = encoder_layers lowercase__ : Any = encoder_attention_heads lowercase__ : Union[str, Any] = decoder_ffn_dim lowercase__ : int = decoder_layers lowercase__ : Any = decoder_attention_heads lowercase__ : Optional[Any] = dropout lowercase__ : List[str] = attention_dropout lowercase__ : List[Any] = activation_dropout lowercase__ : int = activation_function lowercase__ : List[str] = init_std lowercase__ : Dict = encoder_layerdrop lowercase__ : List[Any] = decoder_layerdrop lowercase__ : Tuple = use_cache lowercase__ : List[str] = encoder_layers lowercase__ : str = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=__A , eos_token_id=__A , is_encoder_decoder=__A , decoder_start_token_id=__A , forced_eos_token_id=__A , **__A , ) @property def _lowerCAmelCase( self ) -> int: return self.encoder_attention_heads @property def _lowerCAmelCase( self ) -> int: return self.d_model
152
import argparse import datetime def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): SCREAMING_SNAKE_CASE__ = { """0""": """Sunday""", """1""": """Monday""", """2""": """Tuesday""", """3""": """Wednesday""", """4""": """Thursday""", """5""": """Friday""", """6""": """Saturday""", } SCREAMING_SNAKE_CASE__ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(UpperCamelCase__ ) < 11: raise ValueError("""Must be 10 characters long""" ) # Get month SCREAMING_SNAKE_CASE__ = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError("""Month must be between 1 - 12""" ) SCREAMING_SNAKE_CASE__ = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError("""Date separator must be '-' or '/'""" ) # Get day SCREAMING_SNAKE_CASE__ = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError("""Date must be between 1 - 31""" ) # Get second separator SCREAMING_SNAKE_CASE__ = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError("""Date separator must be '-' or '/'""" ) # Get year SCREAMING_SNAKE_CASE__ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8_500: raise ValueError( """Year out of range. There has to be some sort of limit...right?""" ) # Get datetime obj for validation SCREAMING_SNAKE_CASE__ = datetime.date(int(UpperCamelCase__ ) , int(UpperCamelCase__ ) , int(UpperCamelCase__ ) ) # Start math if m <= 2: SCREAMING_SNAKE_CASE__ = y - 1 SCREAMING_SNAKE_CASE__ = m + 12 # maths var SCREAMING_SNAKE_CASE__ = int(str(UpperCamelCase__ )[:2] ) SCREAMING_SNAKE_CASE__ = int(str(UpperCamelCase__ )[2:] ) SCREAMING_SNAKE_CASE__ = int(2.6 * m - 5.3_9 ) SCREAMING_SNAKE_CASE__ = int(c / 4 ) SCREAMING_SNAKE_CASE__ = int(k / 4 ) SCREAMING_SNAKE_CASE__ = int(d + k ) SCREAMING_SNAKE_CASE__ = int(t + u + v + x ) SCREAMING_SNAKE_CASE__ = int(z - (2 * c) ) SCREAMING_SNAKE_CASE__ = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" ) # Response SCREAMING_SNAKE_CASE__ = f'''Your date {date_input}, is a {days[str(UpperCamelCase__ )]}!''' return response if __name__ == "__main__": import doctest doctest.testmod() _lowerCamelCase = argparse.ArgumentParser( description=( 'Find out what day of the week nearly any date is or was. Enter ' 'date as a string in the mm-dd-yyyy or mm/dd/yyyy format' ) ) parser.add_argument( 'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)' ) _lowerCamelCase = parser.parse_args() zeller(args.date_input)
6
0
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def __magic_name__ ( _lowerCamelCase : Tuple=None ): if subparsers is not None: __a : str = subparsers.add_parser("""test""" ) else: __a : int = argparse.ArgumentParser("""Accelerate test command""" ) parser.add_argument( """--config_file""" , default=UpperCamelCase__ , help=( """The path to use to store the config file. Will default to a file named default_config.yaml in the cache """ """location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """ """such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """ """with 'huggingface'.""" ) , ) if subparsers is not None: parser.set_defaults(func=UpperCamelCase__ ) return parser def __magic_name__ ( _lowerCamelCase : Optional[Any] ): __a : Optional[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] ) if args.config_file is None: __a : List[str] = script_name else: __a : Union[str, Any] = F'''--config_file={args.config_file} {script_name}''' __a : Dict = ["""accelerate-launch"""] + test_args.split() __a : Tuple = execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() ) if result.returncode == 0: print("""Test is a success! You are ready for your distributed training!""" ) def __magic_name__ ( ): __a : Optional[Any] = test_command_parser() __a : Any = parser.parse_args() test_command(UpperCamelCase__ ) if __name__ == "__main__": main()
581
import argparse import logging import pickle from collections import Counter logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) _lowerCamelCase = logging.getLogger(__name__) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser( description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)' ) parser.add_argument( '--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.' ) parser.add_argument( '--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.' ) parser.add_argument('--vocab_size', default=30522, type=int) _lowerCamelCase = parser.parse_args() logger.info(F'''Loading data from {args.data_file}''') with open(args.data_file, 'rb') as fp: _lowerCamelCase = pickle.load(fp) logger.info('Counting occurrences for MLM.') _lowerCamelCase = Counter() for tk_ids in data: counter.update(tk_ids) _lowerCamelCase = [0] * args.vocab_size for k, v in counter.items(): _lowerCamelCase = v logger.info(F'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, 'wb') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
6
0
import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging lowerCamelCase__ = logging.get_logger(__name__) class _lowerCAmelCase ( UpperCamelCase__ ): """simple docstring""" lowerCAmelCase__ ='''linear''' lowerCAmelCase__ ='''cosine''' lowerCAmelCase__ ='''cosine_with_restarts''' lowerCAmelCase__ ='''polynomial''' lowerCAmelCase__ ='''constant''' lowerCAmelCase__ ='''constant_with_warmup''' lowerCAmelCase__ ='''piecewise_constant''' def lowercase_ ( SCREAMING_SNAKE_CASE : Optimizer , SCREAMING_SNAKE_CASE : int = -1 ): """simple docstring""" return LambdaLR(UpperCamelCase__ , lambda SCREAMING_SNAKE_CASE : 1 , last_epoch=UpperCamelCase__ ) def lowercase_ ( SCREAMING_SNAKE_CASE : Optimizer , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int = -1 ): """simple docstring""" def lr_lambda(SCREAMING_SNAKE_CASE : int ): if current_step < num_warmup_steps: return float(UpperCamelCase__ ) / float(max(1.0 , UpperCamelCase__ ) ) return 1.0 return LambdaLR(UpperCamelCase__ , UpperCamelCase__ , last_epoch=UpperCamelCase__ ) def lowercase_ ( SCREAMING_SNAKE_CASE : Optimizer , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int = -1 ): """simple docstring""" snake_case__ : Tuple ={} snake_case__ : int =step_rules.split(''',''' ) for rule_str in rule_list[:-1]: snake_case__, snake_case__ : List[str] =rule_str.split(''':''' ) snake_case__ : List[str] =int(UpperCamelCase__ ) snake_case__ : str =float(UpperCamelCase__ ) snake_case__ : Union[str, Any] =value snake_case__ : List[Any] =float(rule_list[-1] ) def create_rules_function(SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ): def rule_func(SCREAMING_SNAKE_CASE : int ) -> float: snake_case__ : Union[str, Any] =sorted(rules_dict.keys() ) for i, sorted_step in enumerate(UpperCamelCase__ ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func snake_case__ : Optional[Any] =create_rules_function(UpperCamelCase__ , UpperCamelCase__ ) return LambdaLR(UpperCamelCase__ , UpperCamelCase__ , last_epoch=UpperCamelCase__ ) def lowercase_ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str]=-1 ): """simple docstring""" def lr_lambda(SCREAMING_SNAKE_CASE : int ): if current_step < num_warmup_steps: return float(UpperCamelCase__ ) / float(max(1 , UpperCamelCase__ ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def lowercase_ ( SCREAMING_SNAKE_CASE : Optimizer , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float = 0.5 , SCREAMING_SNAKE_CASE : int = -1 ): """simple docstring""" def lr_lambda(SCREAMING_SNAKE_CASE : Any ): if current_step < num_warmup_steps: return float(UpperCamelCase__ ) / float(max(1 , UpperCamelCase__ ) ) snake_case__ : str =float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(UpperCamelCase__ ) * 2.0 * progress )) ) return LambdaLR(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def lowercase_ ( SCREAMING_SNAKE_CASE : Optimizer , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = -1 ): """simple docstring""" def lr_lambda(SCREAMING_SNAKE_CASE : Optional[Any] ): if current_step < num_warmup_steps: return float(UpperCamelCase__ ) / float(max(1 , UpperCamelCase__ ) ) snake_case__ : int =float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(UpperCamelCase__ ) * progress) % 1.0) )) ) return LambdaLR(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def lowercase_ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int=1E-7 , SCREAMING_SNAKE_CASE : int=1.0 , SCREAMING_SNAKE_CASE : Optional[int]=-1 ): """simple docstring""" snake_case__ : Any =optimizer.defaults['''lr'''] if not (lr_init > lr_end): raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' ) def lr_lambda(SCREAMING_SNAKE_CASE : int ): if current_step < num_warmup_steps: return float(UpperCamelCase__ ) / float(max(1 , UpperCamelCase__ ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: snake_case__ : int =lr_init - lr_end snake_case__ : Any =num_training_steps - num_warmup_steps snake_case__ : Optional[int] =1 - (current_step - num_warmup_steps) / decay_steps snake_case__ : int =lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase__ = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def lowercase_ ( SCREAMING_SNAKE_CASE : Union[str, SchedulerType] , SCREAMING_SNAKE_CASE : Optimizer , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : float = 1.0 , SCREAMING_SNAKE_CASE : int = -1 , ): """simple docstring""" snake_case__ : int =SchedulerType(UpperCamelCase__ ) snake_case__ : Optional[Any] =TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(UpperCamelCase__ , last_epoch=UpperCamelCase__ ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(UpperCamelCase__ , step_rules=UpperCamelCase__ , last_epoch=UpperCamelCase__ ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(UpperCamelCase__ , num_warmup_steps=UpperCamelCase__ , last_epoch=UpperCamelCase__ ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( UpperCamelCase__ , num_warmup_steps=UpperCamelCase__ , num_training_steps=UpperCamelCase__ , num_cycles=UpperCamelCase__ , last_epoch=UpperCamelCase__ , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( UpperCamelCase__ , num_warmup_steps=UpperCamelCase__ , num_training_steps=UpperCamelCase__ , power=UpperCamelCase__ , last_epoch=UpperCamelCase__ , ) return schedule_func( UpperCamelCase__ , num_warmup_steps=UpperCamelCase__ , num_training_steps=UpperCamelCase__ , last_epoch=UpperCamelCase__ )
381
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _lowerCamelCase = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = ['SpeechEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = ['FlaxSpeechEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys _lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
6
0
"""simple docstring""" import argparse import torch from datasets import load_dataset from donut import DonutModel from transformers import ( DonutImageProcessor, DonutProcessor, DonutSwinConfig, DonutSwinModel, MBartConfig, MBartForCausalLM, VisionEncoderDecoderModel, XLMRobertaTokenizerFast, ) def a ( __snake_case : str ): '''simple docstring''' UpperCAmelCase_ :List[Any] = model.config UpperCAmelCase_ :str = DonutSwinConfig( image_size=original_config.input_size, patch_size=4, depths=original_config.encoder_layer, num_heads=[4, 8, 16, 32], window_size=original_config.window_size, embed_dim=128, ) UpperCAmelCase_ :Optional[int] = MBartConfig( is_decoder=UpperCamelCase__, is_encoder_decoder=UpperCamelCase__, add_cross_attention=UpperCamelCase__, decoder_layers=original_config.decoder_layer, max_position_embeddings=original_config.max_position_embeddings, vocab_size=len( model.decoder.tokenizer ), scale_embedding=UpperCamelCase__, add_final_layer_norm=UpperCamelCase__, ) return encoder_config, decoder_config def a ( __snake_case : List[str] ): '''simple docstring''' if "encoder.model" in name: UpperCAmelCase_ :Dict = name.replace('''encoder.model''', '''encoder''' ) if "decoder.model" in name: UpperCAmelCase_ :Any = name.replace('''decoder.model''', '''decoder''' ) if "patch_embed.proj" in name: UpperCAmelCase_ :Tuple = name.replace('''patch_embed.proj''', '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: UpperCAmelCase_ :int = name.replace('''patch_embed.norm''', '''embeddings.norm''' ) if name.startswith('''encoder''' ): if "layers" in name: UpperCAmelCase_ :Any = '''encoder.''' + name if "attn.proj" in name: UpperCAmelCase_ :List[str] = name.replace('''attn.proj''', '''attention.output.dense''' ) if "attn" in name and "mask" not in name: UpperCAmelCase_ :int = name.replace('''attn''', '''attention.self''' ) if "norm1" in name: UpperCAmelCase_ :int = name.replace('''norm1''', '''layernorm_before''' ) if "norm2" in name: UpperCAmelCase_ :str = name.replace('''norm2''', '''layernorm_after''' ) if "mlp.fc1" in name: UpperCAmelCase_ :int = name.replace('''mlp.fc1''', '''intermediate.dense''' ) if "mlp.fc2" in name: UpperCAmelCase_ :List[Any] = name.replace('''mlp.fc2''', '''output.dense''' ) if name == "encoder.norm.weight": UpperCAmelCase_ :int = '''encoder.layernorm.weight''' if name == "encoder.norm.bias": UpperCAmelCase_ :List[str] = '''encoder.layernorm.bias''' return name def a ( __snake_case : str, __snake_case : Optional[int] ): '''simple docstring''' for key in orig_state_dict.copy().keys(): UpperCAmelCase_ :Tuple = orig_state_dict.pop(UpperCamelCase__ ) if "qkv" in key: UpperCAmelCase_ :List[Any] = key.split('''.''' ) UpperCAmelCase_ :int = int(key_split[3] ) UpperCAmelCase_ :List[Any] = int(key_split[5] ) UpperCAmelCase_ :Dict = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: UpperCAmelCase_ :Union[str, Any] = val[:dim, :] UpperCAmelCase_ :Optional[Any] = val[dim : dim * 2, :] UpperCAmelCase_ :Any = val[-dim:, :] else: UpperCAmelCase_ :Optional[Any] = val[:dim] UpperCAmelCase_ :Union[str, Any] = val[dim : dim * 2] UpperCAmelCase_ :str = val[-dim:] elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]: # HuggingFace implementation doesn't use attn_mask buffer # and model doesn't use final LayerNorms for the encoder pass else: UpperCAmelCase_ :Union[str, Any] = val return orig_state_dict def a ( __snake_case : Union[str, Any], __snake_case : int=None, __snake_case : str=False ): '''simple docstring''' UpperCAmelCase_ :Tuple = DonutModel.from_pretrained(UpperCamelCase__ ).eval() # load HuggingFace model UpperCAmelCase_ ,UpperCAmelCase_ :Union[str, Any] = get_configs(UpperCamelCase__ ) UpperCAmelCase_ :Dict = DonutSwinModel(UpperCamelCase__ ) UpperCAmelCase_ :Any = MBartForCausalLM(UpperCamelCase__ ) UpperCAmelCase_ :Optional[Any] = VisionEncoderDecoderModel(encoder=UpperCamelCase__, decoder=UpperCamelCase__ ) model.eval() UpperCAmelCase_ :int = original_model.state_dict() UpperCAmelCase_ :List[Any] = convert_state_dict(UpperCamelCase__, UpperCamelCase__ ) model.load_state_dict(UpperCamelCase__ ) # verify results on scanned document UpperCAmelCase_ :Any = load_dataset('''hf-internal-testing/example-documents''' ) UpperCAmelCase_ :List[str] = dataset['''test'''][0]['''image'''].convert('''RGB''' ) UpperCAmelCase_ :Optional[int] = XLMRobertaTokenizerFast.from_pretrained(UpperCamelCase__, from_slow=UpperCamelCase__ ) UpperCAmelCase_ :Optional[Any] = DonutImageProcessor( do_align_long_axis=original_model.config.align_long_axis, size=original_model.config.input_size[::-1] ) UpperCAmelCase_ :List[Any] = DonutProcessor(UpperCamelCase__, UpperCamelCase__ ) UpperCAmelCase_ :int = processor(UpperCamelCase__, return_tensors='''pt''' ).pixel_values if model_name == "naver-clova-ix/donut-base-finetuned-docvqa": UpperCAmelCase_ :str = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>''' UpperCAmelCase_ :Union[str, Any] = '''When is the coffee break?''' UpperCAmelCase_ :List[Any] = task_prompt.replace('''{user_input}''', UpperCamelCase__ ) elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip": UpperCAmelCase_ :Union[str, Any] = '''<s_rvlcdip>''' elif model_name in [ "naver-clova-ix/donut-base-finetuned-cord-v1", "naver-clova-ix/donut-base-finetuned-cord-v1-2560", ]: UpperCAmelCase_ :int = '''<s_cord>''' elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2": UpperCAmelCase_ :Dict = '''s_cord-v2>''' elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket": UpperCAmelCase_ :Optional[int] = '''<s_zhtrainticket>''' elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]: # use a random prompt UpperCAmelCase_ :Optional[Any] = '''hello world''' else: raise ValueError('''Model name not supported''' ) UpperCAmelCase_ :Optional[Any] = original_model.decoder.tokenizer(UpperCamelCase__, add_special_tokens=UpperCamelCase__, return_tensors='''pt''' )[ '''input_ids''' ] UpperCAmelCase_ :int = original_model.encoder.model.patch_embed(UpperCamelCase__ ) UpperCAmelCase_ ,UpperCAmelCase_ :Any = model.encoder.embeddings(UpperCamelCase__ ) assert torch.allclose(UpperCamelCase__, UpperCamelCase__, atol=1E-3 ) # verify encoder hidden states UpperCAmelCase_ :Any = original_model.encoder(UpperCamelCase__ ) UpperCAmelCase_ :Optional[int] = model.encoder(UpperCamelCase__ ).last_hidden_state assert torch.allclose(UpperCamelCase__, UpperCamelCase__, atol=1E-2 ) # verify decoder hidden states UpperCAmelCase_ :Tuple = original_model(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ).logits UpperCAmelCase_ :Union[str, Any] = model(UpperCamelCase__, decoder_input_ids=UpperCamelCase__ ).logits assert torch.allclose(UpperCamelCase__, UpperCamelCase__, atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f'Saving model and processor to {pytorch_dump_folder_path}' ) model.save_pretrained(UpperCamelCase__ ) processor.save_pretrained(UpperCamelCase__ ) if push_to_hub: model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1], commit_message='''Update model''' ) processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1], commit_message='''Update model''' ) if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="naver-clova-ix/donut-base-finetuned-docvqa", required=False, type=str, help="Name of the original model you\'d like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, required=False, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model and processor to the 🤗 hub.", ) __lowerCamelCase = parser.parse_args() convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
608
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = ["image_processor", "tokenizer"] lowerCamelCase_ = "OwlViTImageProcessor" lowerCamelCase_ = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self :Optional[Any] , __A :int=None , __A :Optional[int]=None , **__A :str ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __A , ) SCREAMING_SNAKE_CASE__ = kwargs.pop("""feature_extractor""" ) SCREAMING_SNAKE_CASE__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__A , __A ) def __call__( self :str , __A :Dict=None , __A :List[str]=None , __A :str=None , __A :Optional[int]="max_length" , __A :Tuple="np" , **__A :int ) -> Tuple: """simple docstring""" if text is None and query_images is None and images is None: raise ValueError( """You have to specify at least one text or query image or image. All three cannot be none.""" ) if text is not None: if isinstance(__A , __A ) or (isinstance(__A , __A ) and not isinstance(text[0] , __A )): SCREAMING_SNAKE_CASE__ = [self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )] elif isinstance(__A , __A ) and isinstance(text[0] , __A ): SCREAMING_SNAKE_CASE__ = [] # Maximum number of queries across batch SCREAMING_SNAKE_CASE__ = max([len(__A ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__A ) != max_num_queries: SCREAMING_SNAKE_CASE__ = t + [""" """] * (max_num_queries - len(__A )) SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , padding=__A , return_tensors=__A , **__A ) encodings.append(__A ) else: raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" ) if return_tensors == "np": SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 ) SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) else: raise ValueError("""Target return tensor type could not be returned""" ) SCREAMING_SNAKE_CASE__ = BatchEncoding() SCREAMING_SNAKE_CASE__ = input_ids SCREAMING_SNAKE_CASE__ = attention_mask if query_images is not None: SCREAMING_SNAKE_CASE__ = BatchEncoding() SCREAMING_SNAKE_CASE__ = self.image_processor( __A , return_tensors=__A , **__A ).pixel_values SCREAMING_SNAKE_CASE__ = query_pixel_values if images is not None: SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A ) if text is not None and images is not None: SCREAMING_SNAKE_CASE__ = image_features.pixel_values return encoding elif query_images is not None and images is not None: SCREAMING_SNAKE_CASE__ = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__A ) , tensor_type=__A ) def _snake_case ( self :List[Any] , *__A :Dict , **__A :Dict ) -> Optional[int]: """simple docstring""" return self.image_processor.post_process(*__A , **__A ) def _snake_case ( self :Optional[int] , *__A :Dict , **__A :List[str] ) -> Optional[Any]: """simple docstring""" return self.image_processor.post_process_object_detection(*__A , **__A ) def _snake_case ( self :str , *__A :List[str] , **__A :Union[str, Any] ) -> Any: """simple docstring""" return self.image_processor.post_process_image_guided_detection(*__A , **__A ) def _snake_case ( self :Dict , *__A :List[str] , **__A :List[str] ) -> int: """simple docstring""" return self.tokenizer.batch_decode(*__A , **__A ) def _snake_case ( self :Dict , *__A :Dict , **__A :List[str] ) -> str: """simple docstring""" return self.tokenizer.decode(*__A , **__A ) @property def _snake_case ( self :List[Any] ) -> Optional[int]: """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __A , ) return self.image_processor_class @property def _snake_case ( self :Any ) -> Optional[Any]: """simple docstring""" warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __A , ) return self.image_processor
6
0
"""simple docstring""" from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ): """simple docstring""" __snake_case : List[str] = 42 class SCREAMING_SNAKE_CASE_ ( nn.Module ): """simple docstring""" def __init__( self :Any , __lowercase :str=3 , __lowercase :List[Any]=3 , __lowercase :str=("DownEncoderBlock2D",) , __lowercase :Optional[int]=(64,) , __lowercase :str=2 , __lowercase :Optional[Any]=32 , __lowercase :int="silu" , __lowercase :List[Any]=True , ): super().__init__() __lowerCamelCase : Optional[int] =layers_per_block __lowerCamelCase : Dict =torch.nn.Convad( __A , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) __lowerCamelCase : str =None __lowerCamelCase : Tuple =nn.ModuleList([] ) # down __lowerCamelCase : Optional[int] =block_out_channels[0] for i, down_block_type in enumerate(__A ): __lowerCamelCase : int =output_channel __lowerCamelCase : Union[str, Any] =block_out_channels[i] __lowerCamelCase : List[str] =i == len(__A ) - 1 __lowerCamelCase : int =get_down_block( __A , num_layers=self.layers_per_block , in_channels=__A , out_channels=__A , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__A , resnet_groups=__A , attention_head_dim=__A , temb_channels=__A , ) self.down_blocks.append(__A ) # mid __lowerCamelCase : Union[str, Any] =UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__A , output_scale_factor=1 , resnet_time_scale_shift='''default''' , attention_head_dim=block_out_channels[-1] , resnet_groups=__A , temb_channels=__A , ) # out __lowerCamelCase : Tuple =nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__A , eps=1e-6 ) __lowerCamelCase : Tuple =nn.SiLU() __lowerCamelCase : List[Any] =2 * out_channels if double_z else out_channels __lowerCamelCase : Tuple =nn.Convad(block_out_channels[-1] , __A , 3 , padding=1 ) __lowerCamelCase : Optional[Any] =False def __lowercase ( self :Optional[int] , __lowercase :Tuple ): __lowerCamelCase : List[Any] =x __lowerCamelCase : Dict =self.conv_in(__A ) if self.training and self.gradient_checkpointing: def create_custom_forward(__lowercase :Tuple ): def custom_forward(*__lowercase :List[Any] ): return module(*__A ) return custom_forward # down if is_torch_version('''>=''' , '''1.11.0''' ): for down_block in self.down_blocks: __lowerCamelCase : Optional[Any] =torch.utils.checkpoint.checkpoint( create_custom_forward(__A ) , __A , use_reentrant=__A ) # middle __lowerCamelCase : Tuple =torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , __A , use_reentrant=__A ) else: for down_block in self.down_blocks: __lowerCamelCase : Any =torch.utils.checkpoint.checkpoint(create_custom_forward(__A ) , __A ) # middle __lowerCamelCase : Optional[int] =torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __A ) else: # down for down_block in self.down_blocks: __lowerCamelCase : Any =down_block(__A ) # middle __lowerCamelCase : List[Any] =self.mid_block(__A ) # post-process __lowerCamelCase : Union[str, Any] =self.conv_norm_out(__A ) __lowerCamelCase : List[str] =self.conv_act(__A ) __lowerCamelCase : int =self.conv_out(__A ) return sample class SCREAMING_SNAKE_CASE_ ( nn.Module ): """simple docstring""" def __init__( self :Any , __lowercase :List[Any]=3 , __lowercase :str=3 , __lowercase :Tuple=("UpDecoderBlock2D",) , __lowercase :Tuple=(64,) , __lowercase :Any=2 , __lowercase :int=32 , __lowercase :Optional[int]="silu" , __lowercase :Optional[Any]="group" , ): super().__init__() __lowerCamelCase : Union[str, Any] =layers_per_block __lowerCamelCase : Tuple =nn.Convad( __A , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) __lowerCamelCase : Any =None __lowerCamelCase : List[str] =nn.ModuleList([] ) __lowerCamelCase : Optional[int] =in_channels if norm_type == '''spatial''' else None # mid __lowerCamelCase : List[Any] =UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__A , output_scale_factor=1 , resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__A , temb_channels=__A , ) # up __lowerCamelCase : Optional[Any] =list(reversed(__A ) ) __lowerCamelCase : List[str] =reversed_block_out_channels[0] for i, up_block_type in enumerate(__A ): __lowerCamelCase : Any =output_channel __lowerCamelCase : List[str] =reversed_block_out_channels[i] __lowerCamelCase : Optional[int] =i == len(__A ) - 1 __lowerCamelCase : Tuple =get_up_block( __A , num_layers=self.layers_per_block + 1 , in_channels=__A , out_channels=__A , prev_output_channel=__A , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__A , resnet_groups=__A , attention_head_dim=__A , temb_channels=__A , resnet_time_scale_shift=__A , ) self.up_blocks.append(__A ) __lowerCamelCase : str =output_channel # out if norm_type == "spatial": __lowerCamelCase : Optional[Any] =SpatialNorm(block_out_channels[0] , __A ) else: __lowerCamelCase : Optional[int] =nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__A , eps=1e-6 ) __lowerCamelCase : int =nn.SiLU() __lowerCamelCase : List[str] =nn.Convad(block_out_channels[0] , __A , 3 , padding=1 ) __lowerCamelCase : List[Any] =False def __lowercase ( self :str , __lowercase :Tuple , __lowercase :int=None ): __lowerCamelCase : Optional[Any] =z __lowerCamelCase : Dict =self.conv_in(__A ) __lowerCamelCase : Union[str, Any] =next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(__lowercase :Tuple ): def custom_forward(*__lowercase :List[Any] ): return module(*__A ) return custom_forward if is_torch_version('''>=''' , '''1.11.0''' ): # middle __lowerCamelCase : Optional[int] =torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , __A , __A , use_reentrant=__A ) __lowerCamelCase : List[str] =sample.to(__A ) # up for up_block in self.up_blocks: __lowerCamelCase : int =torch.utils.checkpoint.checkpoint( create_custom_forward(__A ) , __A , __A , use_reentrant=__A ) else: # middle __lowerCamelCase : Any =torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , __A , __A ) __lowerCamelCase : Any =sample.to(__A ) # up for up_block in self.up_blocks: __lowerCamelCase : Any =torch.utils.checkpoint.checkpoint(create_custom_forward(__A ) , __A , __A ) else: # middle __lowerCamelCase : List[Any] =self.mid_block(__A , __A ) __lowerCamelCase : str =sample.to(__A ) # up for up_block in self.up_blocks: __lowerCamelCase : Dict =up_block(__A , __A ) # post-process if latent_embeds is None: __lowerCamelCase : Dict =self.conv_norm_out(__A ) else: __lowerCamelCase : str =self.conv_norm_out(__A , __A ) __lowerCamelCase : Dict =self.conv_act(__A ) __lowerCamelCase : List[Any] =self.conv_out(__A ) return sample class SCREAMING_SNAKE_CASE_ ( nn.Module ): """simple docstring""" def __init__( self :Optional[int] , __lowercase :Optional[Any] , __lowercase :List[Any] , __lowercase :str , __lowercase :List[str]=None , __lowercase :Optional[Any]="random" , __lowercase :Optional[int]=False , __lowercase :int=True ): super().__init__() __lowerCamelCase : int =n_e __lowerCamelCase : List[Any] =vq_embed_dim __lowerCamelCase : Tuple =beta __lowerCamelCase : Dict =legacy __lowerCamelCase : Optional[int] =nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) __lowerCamelCase : Dict =remap if self.remap is not None: self.register_buffer('''used''' , torch.tensor(np.load(self.remap ) ) ) __lowerCamelCase : str =self.used.shape[0] __lowerCamelCase : str =unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": __lowerCamelCase : List[Any] =self.re_embed __lowerCamelCase : Optional[int] =self.re_embed + 1 print( f'Remapping {self.n_e} indices to {self.re_embed} indices. ' f'Using {self.unknown_index} for unknown indices.' ) else: __lowerCamelCase : int =n_e __lowerCamelCase : Any =sane_index_shape def __lowercase ( self :Tuple , __lowercase :List[Any] ): __lowerCamelCase : str =inds.shape assert len(__A ) > 1 __lowerCamelCase : Optional[int] =inds.reshape(ishape[0] , -1 ) __lowerCamelCase : Any =self.used.to(__A ) __lowerCamelCase : Union[str, Any] =(inds[:, :, None] == used[None, None, ...]).long() __lowerCamelCase : Optional[int] =match.argmax(-1 ) __lowerCamelCase : Union[str, Any] =match.sum(2 ) < 1 if self.unknown_index == "random": __lowerCamelCase : Any =torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: __lowerCamelCase : Optional[Any] =self.unknown_index return new.reshape(__A ) def __lowercase ( self :Dict , __lowercase :Optional[Any] ): __lowerCamelCase : Tuple =inds.shape assert len(__A ) > 1 __lowerCamelCase : str =inds.reshape(ishape[0] , -1 ) __lowerCamelCase : List[Any] =self.used.to(__A ) if self.re_embed > self.used.shape[0]: # extra token __lowerCamelCase : Optional[Any] =0 # simply set to zero __lowerCamelCase : Dict =torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __A ) return back.reshape(__A ) def __lowercase ( self :Optional[int] , __lowercase :List[str] ): __lowerCamelCase : List[Any] =z.permute(0 , 2 , 3 , 1 ).contiguous() __lowerCamelCase : List[Any] =z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z __lowerCamelCase : str =torch.argmin(torch.cdist(__A , self.embedding.weight ) , dim=1 ) __lowerCamelCase : Dict =self.embedding(__A ).view(z.shape ) __lowerCamelCase : Optional[Any] =None __lowerCamelCase : Union[str, Any] =None # compute loss for embedding if not self.legacy: __lowerCamelCase : Optional[Any] =self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: __lowerCamelCase : Tuple =torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients __lowerCamelCase : str =z + (z_q - z).detach() # reshape back to match original input shape __lowerCamelCase : Optional[Any] =z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: __lowerCamelCase : Dict =min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis __lowerCamelCase : int =self.remap_to_used(__A ) __lowerCamelCase : Optional[Any] =min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: __lowerCamelCase : Dict =min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def __lowercase ( self :Union[str, Any] , __lowercase :Dict , __lowercase :Optional[Any] ): if self.remap is not None: __lowerCamelCase : Any =indices.reshape(shape[0] , -1 ) # add batch axis __lowerCamelCase : List[str] =self.unmap_to_all(__A ) __lowerCamelCase : Union[str, Any] =indices.reshape(-1 ) # flatten again # get quantized latent vectors __lowerCamelCase : Dict =self.embedding(__A ) if shape is not None: __lowerCamelCase : Dict =z_q.view(__A ) # reshape back to match original input shape __lowerCamelCase : List[Any] =z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ): """simple docstring""" def __init__( self :List[Any] , __lowercase :List[str] , __lowercase :Dict=False ): __lowerCamelCase : List[str] =parameters __lowerCamelCase , __lowerCamelCase : Union[str, Any] =torch.chunk(__A , 2 , dim=1 ) __lowerCamelCase : int =torch.clamp(self.logvar , -30.0 , 20.0 ) __lowerCamelCase : Union[str, Any] =deterministic __lowerCamelCase : Any =torch.exp(0.5 * self.logvar ) __lowerCamelCase : Union[str, Any] =torch.exp(self.logvar ) if self.deterministic: __lowerCamelCase : Optional[Any] =torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def __lowercase ( self :Union[str, Any] , __lowercase :Optional[torch.Generator] = None ): __lowerCamelCase : List[Any] =randn_tensor( self.mean.shape , generator=__A , device=self.parameters.device , dtype=self.parameters.dtype ) __lowerCamelCase : str =self.mean + self.std * sample return x def __lowercase ( self :str , __lowercase :Any=None ): if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def __lowercase ( self :str , __lowercase :Optional[int] , __lowercase :List[Any]=[1, 2, 3] ): if self.deterministic: return torch.Tensor([0.0] ) __lowerCamelCase : Union[str, Any] =np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__A ) def __lowercase ( self :Any ): return self.mean
179
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = 42 class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ ): @register_to_config def __init__( self :Union[str, Any] , __A :int = 3 , __A :int = 3 , __A :Tuple[str] = ("DownEncoderBlock2D",) , __A :Tuple[str] = ("UpDecoderBlock2D",) , __A :Tuple[int] = (64,) , __A :int = 1 , __A :str = "silu" , __A :int = 3 , __A :int = 32 , __A :int = 256 , __A :int = 32 , __A :Optional[int] = None , __A :float = 0.1_8_2_1_5 , __A :str = "group" , ) -> Any: """simple docstring""" super().__init__() # pass init params to Encoder SCREAMING_SNAKE_CASE__ = Encoder( in_channels=__A , out_channels=__A , down_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , double_z=__A , ) SCREAMING_SNAKE_CASE__ = vq_embed_dim if vq_embed_dim is not None else latent_channels SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 ) SCREAMING_SNAKE_CASE__ = VectorQuantizer(__A , __A , beta=0.2_5 , remap=__A , sane_index_shape=__A ) SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 ) # pass init params to Decoder SCREAMING_SNAKE_CASE__ = Decoder( in_channels=__A , out_channels=__A , up_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , norm_type=__A , ) @apply_forward_hook def _snake_case ( self :Union[str, Any] , __A :torch.FloatTensor , __A :bool = True ) -> VQEncoderOutput: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.encoder(__A ) SCREAMING_SNAKE_CASE__ = self.quant_conv(__A ) if not return_dict: return (h,) return VQEncoderOutput(latents=__A ) @apply_forward_hook def _snake_case ( self :Tuple , __A :torch.FloatTensor , __A :bool = False , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: """simple docstring""" if not force_not_quantize: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.quantize(__A ) else: SCREAMING_SNAKE_CASE__ = h SCREAMING_SNAKE_CASE__ = self.post_quant_conv(__A ) SCREAMING_SNAKE_CASE__ = self.decoder(__A , quant if self.config.norm_type == """spatial""" else None ) if not return_dict: return (dec,) return DecoderOutput(sample=__A ) def _snake_case ( self :int , __A :torch.FloatTensor , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: """simple docstring""" SCREAMING_SNAKE_CASE__ = sample SCREAMING_SNAKE_CASE__ = self.encode(__A ).latents SCREAMING_SNAKE_CASE__ = self.decode(__A ).sample if not return_dict: return (dec,) return DecoderOutput(sample=__A )
6
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE : Dict =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Any ={ '''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''', # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class A_ ( UpperCamelCase__ ): _A :List[str] = '''vit_mae''' def __init__( self : int , snake_case__ : str=7_68 , snake_case__ : str=12 , snake_case__ : List[Any]=12 , snake_case__ : Optional[Any]=30_72 , snake_case__ : Optional[int]="gelu" , snake_case__ : List[str]=0.0 , snake_case__ : Optional[int]=0.0 , snake_case__ : Dict=0.02 , snake_case__ : List[str]=1E-12 , snake_case__ : List[Any]=2_24 , snake_case__ : Any=16 , snake_case__ : Tuple=3 , snake_case__ : Optional[Any]=True , snake_case__ : int=16 , snake_case__ : int=5_12 , snake_case__ : int=8 , snake_case__ : List[str]=20_48 , snake_case__ : Any=0.75 , snake_case__ : Dict=False , **snake_case__ : int , ): super().__init__(**__A ) lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = intermediate_size lowercase = hidden_act lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = initializer_range lowercase = layer_norm_eps lowercase = image_size lowercase = patch_size lowercase = num_channels lowercase = qkv_bias lowercase = decoder_num_attention_heads lowercase = decoder_hidden_size lowercase = decoder_num_hidden_layers lowercase = decoder_intermediate_size lowercase = mask_ratio lowercase = norm_pix_loss
428
import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = 42 lowerCamelCase_ = jnp.floataa lowerCamelCase_ = True def _snake_case ( self :Tuple ) -> Optional[Any]: """simple docstring""" super().setup() SCREAMING_SNAKE_CASE__ = nn.Dense(5 , dtype=self.dtype ) def __call__( self :List[Any] , *__A :int , **__A :Optional[Any] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = super().__call__(*__A , **__A ) SCREAMING_SNAKE_CASE__ = self.cls(outputs[2] ) return outputs[:2] + (cls_out,) class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = FlaxBigBirdForNaturalQuestionsModule def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Tuple , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ): def cross_entropy(UpperCamelCase__: List[str] , UpperCamelCase__: List[str] , UpperCamelCase__: List[str]=None ): SCREAMING_SNAKE_CASE__ = logits.shape[-1] SCREAMING_SNAKE_CASE__ = (labels[..., None] == jnp.arange(UpperCamelCase__ )[None]).astype("""f4""" ) SCREAMING_SNAKE_CASE__ = jax.nn.log_softmax(UpperCamelCase__ , axis=-1 ) SCREAMING_SNAKE_CASE__ = -jnp.sum(labels * logits , axis=-1 ) if reduction is not None: SCREAMING_SNAKE_CASE__ = reduction(UpperCamelCase__ ) return loss SCREAMING_SNAKE_CASE__ = partial(UpperCamelCase__ , reduction=jnp.mean ) SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class UpperCamelCase_ : lowerCamelCase_ = "google/bigbird-roberta-base" lowerCamelCase_ = 30_00 lowerCamelCase_ = 1_05_00 lowerCamelCase_ = 1_28 lowerCamelCase_ = 3 lowerCamelCase_ = 1 lowerCamelCase_ = 5 # tx_args lowerCamelCase_ = 3e-5 lowerCamelCase_ = 0.0 lowerCamelCase_ = 2_00_00 lowerCamelCase_ = 0.0095 lowerCamelCase_ = "bigbird-roberta-natural-questions" lowerCamelCase_ = "training-expt" lowerCamelCase_ = "data/nq-training.jsonl" lowerCamelCase_ = "data/nq-validation.jsonl" def _snake_case ( self :str ) -> Optional[int]: """simple docstring""" os.makedirs(self.base_dir , exist_ok=__A ) SCREAMING_SNAKE_CASE__ = os.path.join(self.base_dir , self.save_dir ) SCREAMING_SNAKE_CASE__ = self.batch_size_per_device * jax.device_count() @dataclass class UpperCamelCase_ : lowerCamelCase_ = 42 lowerCamelCase_ = 40_96 # no dynamic padding on TPUs def __call__( self :Optional[Any] , __A :Optional[int] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.collate_fn(__A ) SCREAMING_SNAKE_CASE__ = jax.tree_util.tree_map(__A , __A ) return batch def _snake_case ( self :List[Any] , __A :Union[str, Any] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.fetch_inputs(features["""input_ids"""] ) SCREAMING_SNAKE_CASE__ = { """input_ids""": jnp.array(__A , dtype=jnp.intaa ), """attention_mask""": jnp.array(__A , dtype=jnp.intaa ), """start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ), """end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ), """pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ), } return batch def _snake_case ( self :Tuple , __A :list ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = [self._fetch_inputs(__A ) for ids in input_ids] return zip(*__A ) def _snake_case ( self :List[str] , __A :list ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [1 for _ in range(len(__A ) )] while len(__A ) < self.max_length: input_ids.append(self.pad_id ) attention_mask.append(0 ) return input_ids, attention_mask def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any]=None ): if seed is not None: SCREAMING_SNAKE_CASE__ = dataset.shuffle(seed=UpperCamelCase__ ) for i in range(len(UpperCamelCase__ ) // batch_size ): SCREAMING_SNAKE_CASE__ = dataset[i * batch_size : (i + 1) * batch_size] yield dict(UpperCamelCase__ ) @partial(jax.pmap , axis_name="""batch""" ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] , **UpperCamelCase__: Optional[int] ): def loss_fn(UpperCamelCase__: List[Any] ): SCREAMING_SNAKE_CASE__ = model_inputs.pop("""start_labels""" ) SCREAMING_SNAKE_CASE__ = model_inputs.pop("""end_labels""" ) SCREAMING_SNAKE_CASE__ = model_inputs.pop("""pooled_labels""" ) SCREAMING_SNAKE_CASE__ = state.apply_fn(**UpperCamelCase__ , params=UpperCamelCase__ , dropout_rng=UpperCamelCase__ , train=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs return state.loss_fn( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = jax.random.split(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = jax.value_and_grad(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = grad_fn(state.params ) SCREAMING_SNAKE_CASE__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) SCREAMING_SNAKE_CASE__ = jax.lax.pmean(UpperCamelCase__ , """batch""" ) SCREAMING_SNAKE_CASE__ = state.apply_gradients(grads=UpperCamelCase__ ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name="""batch""" ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , **UpperCamelCase__: Dict ): SCREAMING_SNAKE_CASE__ = model_inputs.pop("""start_labels""" ) SCREAMING_SNAKE_CASE__ = model_inputs.pop("""end_labels""" ) SCREAMING_SNAKE_CASE__ = model_inputs.pop("""pooled_labels""" ) SCREAMING_SNAKE_CASE__ = state.apply_fn(**UpperCamelCase__ , params=state.params , train=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs SCREAMING_SNAKE_CASE__ = state.loss_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) return metrics class UpperCamelCase_ ( train_state.TrainState ): lowerCamelCase_ = struct.field(pytree_node=UpperCamelCase__ ) @dataclass class UpperCamelCase_ : lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = None def _snake_case ( self :List[Any] , __A :str , __A :str , __A :str , __A :Tuple=None ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = model.params SCREAMING_SNAKE_CASE__ = TrainState.create( apply_fn=model.__call__ , params=__A , tx=__A , loss_fn=__A , ) if ckpt_dir is not None: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = restore_checkpoint(__A , __A ) SCREAMING_SNAKE_CASE__ = { """lr""": args.lr, """init_lr""": args.init_lr, """warmup_steps""": args.warmup_steps, """num_train_steps""": num_train_steps, """weight_decay""": args.weight_decay, } SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = build_tx(**__A ) SCREAMING_SNAKE_CASE__ = train_state.TrainState( step=__A , apply_fn=model.__call__ , params=__A , tx=__A , opt_state=__A , ) SCREAMING_SNAKE_CASE__ = args SCREAMING_SNAKE_CASE__ = data_collator SCREAMING_SNAKE_CASE__ = lr SCREAMING_SNAKE_CASE__ = params SCREAMING_SNAKE_CASE__ = jax_utils.replicate(__A ) return state def _snake_case ( self :Optional[Any] , __A :Optional[int] , __A :int , __A :int ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.args SCREAMING_SNAKE_CASE__ = len(__A ) // args.batch_size SCREAMING_SNAKE_CASE__ = jax.random.PRNGKey(0 ) SCREAMING_SNAKE_CASE__ = jax.random.split(__A , jax.device_count() ) for epoch in range(args.max_epochs ): SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE__ = get_batched_dataset(__A , args.batch_size , seed=__A ) SCREAMING_SNAKE_CASE__ = 0 for batch in tqdm(__A , total=__A , desc=f'''Running EPOCH-{epoch}''' ): SCREAMING_SNAKE_CASE__ = self.data_collator(__A ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.train_step_fn(__A , __A , **__A ) running_loss += jax_utils.unreplicate(metrics["""loss"""] ) i += 1 if i % args.logging_steps == 0: SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(state.step ) SCREAMING_SNAKE_CASE__ = running_loss.item() / i SCREAMING_SNAKE_CASE__ = self.scheduler_fn(state_step - 1 ) SCREAMING_SNAKE_CASE__ = self.evaluate(__A , __A ) SCREAMING_SNAKE_CASE__ = { """step""": state_step.item(), """eval_loss""": eval_loss.item(), """tr_loss""": tr_loss, """lr""": lr.item(), } tqdm.write(str(__A ) ) self.logger.log(__A , commit=__A ) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=__A ) def _snake_case ( self :List[str] , __A :Dict , __A :str ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = get_batched_dataset(__A , self.args.batch_size ) SCREAMING_SNAKE_CASE__ = len(__A ) // self.args.batch_size SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE__ = 0 for batch in tqdm(__A , total=__A , desc="""Evaluating ... """ ): SCREAMING_SNAKE_CASE__ = self.data_collator(__A ) SCREAMING_SNAKE_CASE__ = self.val_step_fn(__A , **__A ) running_loss += jax_utils.unreplicate(metrics["""loss"""] ) i += 1 return running_loss / i def _snake_case ( self :List[Any] , __A :Any , __A :Dict ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(__A ) print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=""" ... """ ) self.model_save_fn(__A , params=state.params ) with open(os.path.join(__A , """opt_state.msgpack""" ) , """wb""" ) as f: f.write(to_bytes(state.opt_state ) ) joblib.dump(self.args , os.path.join(__A , """args.joblib""" ) ) joblib.dump(self.data_collator , os.path.join(__A , """data_collator.joblib""" ) ) with open(os.path.join(__A , """training_state.json""" ) , """w""" ) as f: json.dump({"""step""": state.step.item()} , __A ) print("""DONE""" ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ): print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=""" ... """ ) with open(os.path.join(UpperCamelCase__ , """flax_model.msgpack""" ) , """rb""" ) as f: SCREAMING_SNAKE_CASE__ = from_bytes(state.params , f.read() ) with open(os.path.join(UpperCamelCase__ , """opt_state.msgpack""" ) , """rb""" ) as f: SCREAMING_SNAKE_CASE__ = from_bytes(state.opt_state , f.read() ) SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(UpperCamelCase__ , """args.joblib""" ) ) SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(UpperCamelCase__ , """data_collator.joblib""" ) ) with open(os.path.join(UpperCamelCase__ , """training_state.json""" ) , """r""" ) as f: SCREAMING_SNAKE_CASE__ = json.load(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = training_state["""step"""] print("""DONE""" ) return params, opt_state, step, args, data_collator def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ): SCREAMING_SNAKE_CASE__ = num_train_steps - warmup_steps SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=UpperCamelCase__ , transition_steps=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=1e-7 , transition_steps=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple ): def weight_decay_mask(UpperCamelCase__: Any ): SCREAMING_SNAKE_CASE__ = traverse_util.flatten_dict(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()} return traverse_util.unflatten_dict(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = scheduler_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = optax.adamw(learning_rate=UpperCamelCase__ , weight_decay=UpperCamelCase__ , mask=UpperCamelCase__ ) return tx, lr
6
0
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ): """simple docstring""" __UpperCAmelCase : List[Any] = LongformerTokenizer __UpperCAmelCase : List[str] = True __UpperCAmelCase : Optional[Any] = LongformerTokenizerFast __UpperCAmelCase : Tuple = True def __lowercase ( self : int ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _a : Optional[Any] = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] _a : Optional[Any] = dict(zip(__A ,range(len(__A ) ) ) ) _a : Any = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] _a : List[str] = {'unk_token': '<unk>'} _a : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) _a : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp: fp.write(json.dumps(__A ) + '\n' ) with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp: fp.write('\n'.join(__A ) ) def __lowercase ( self : str ,**_a : Tuple ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname ,**__A ) def __lowercase ( self : Union[str, Any] ,**_a : str ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**__A ) def __lowercase ( self : List[Any] ,_a : Optional[Any] ): '''simple docstring''' _a : Dict = 'lower newer' _a : Any = 'lower newer' return input_text, output_text def __lowercase ( self : str ): '''simple docstring''' _a : Any = self.tokenizer_class(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) _a : Union[str, Any] = 'lower newer' _a : Any = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] _a : Dict = tokenizer.tokenize(__A ) # , add_prefix_space=True) self.assertListEqual(__A ,__A ) _a : Any = tokens + [tokenizer.unk_token] _a : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) ,__A ) def __lowercase ( self : int ): '''simple docstring''' _a : Tuple = self.get_tokenizer() self.assertListEqual(tokenizer.encode('Hello world!' ,add_special_tokens=__A ) ,[0, 3_1414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode('Hello world! cécé herlolip 418' ,add_special_tokens=__A ) ,[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] ,) @slow def __lowercase ( self : Any ): '''simple docstring''' _a : Tuple = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' ) _a : Optional[int] = tokenizer.encode('sequence builders' ,add_special_tokens=__A ) _a : Dict = tokenizer.encode('multi-sequence build' ,add_special_tokens=__A ) _a : List[str] = tokenizer.encode( 'sequence builders' ,add_special_tokens=__A ,add_prefix_space=__A ) _a : Union[str, Any] = tokenizer.encode( 'sequence builders' ,'multi-sequence build' ,add_special_tokens=__A ,add_prefix_space=__A ) _a : Any = tokenizer.build_inputs_with_special_tokens(__A ) _a : Any = tokenizer.build_inputs_with_special_tokens(__A ,__A ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def __lowercase ( self : List[str] ): '''simple docstring''' _a : List[str] = self.get_tokenizer() _a : List[str] = 'Encode this sequence.' _a : Dict = tokenizer.byte_encoder[' '.encode('utf-8' )[0]] # Testing encoder arguments _a : Optional[Any] = tokenizer.encode(__A ,add_special_tokens=__A ,add_prefix_space=__A ) _a : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(__A ,__A ) _a : Union[str, Any] = tokenizer.encode(__A ,add_special_tokens=__A ,add_prefix_space=__A ) _a : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(__A ,__A ) tokenizer.add_special_tokens({'bos_token': '<s>'} ) _a : List[Any] = tokenizer.encode(__A ,add_special_tokens=__A ) _a : Tuple = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(__A ,__A ) # Testing spaces after special tokens _a : Union[str, Any] = '<mask>' tokenizer.add_special_tokens( {'mask_token': AddedToken(__A ,lstrip=__A ,rstrip=__A )} ) # mask token has a left space _a : Any = tokenizer.convert_tokens_to_ids(__A ) _a : int = 'Encode <mask> sequence' _a : Any = 'Encode <mask>sequence' _a : str = tokenizer.encode(__A ) _a : Union[str, Any] = encoded.index(__A ) _a : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(__A ,__A ) _a : Any = tokenizer.encode(__A ) _a : Optional[int] = encoded.index(__A ) _a : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(__A ,__A ) def __lowercase ( self : Tuple ): '''simple docstring''' pass def __lowercase ( self : List[str] ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _a : str = self.rust_tokenizer_class.from_pretrained(__A ,**__A ) _a : str = self.tokenizer_class.from_pretrained(__A ,**__A ) _a : Optional[int] = 'A, <mask> AllenNLP sentence.' _a : int = tokenizer_r.encode_plus(__A ,add_special_tokens=__A ,return_token_type_ids=__A ) _a : Dict = tokenizer_p.encode_plus(__A ,add_special_tokens=__A ,return_token_type_ids=__A ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ) ,sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) ,sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) ,) _a : Tuple = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) _a : Optional[int] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'] ,[0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'] ,[0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual( __A ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( __A ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) def __lowercase ( self : Optional[int] ): '''simple docstring''' for trim_offsets, add_prefix_space in itertools.product([True, False] ,repeat=2 ): _a : str = self.rust_tokenizer_class.from_pretrained( self.tmpdirname ,use_fast=__A ,add_prefix_space=__A ,trim_offsets=__A ) _a : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) _a : Dict = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['add_prefix_space'] ,__A ) self.assertEqual(post_processor_state['add_prefix_space'] ,__A ) self.assertEqual(post_processor_state['trim_offsets'] ,__A ) def __lowercase ( self : Optional[int] ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _a : Any = 'hello' # `hello` is a token in the vocabulary of `pretrained_name` _a : Optional[Any] = F"""{text_of_1_token} {text_of_1_token}""" _a : Optional[Any] = self.rust_tokenizer_class.from_pretrained( __A ,use_fast=__A ,add_prefix_space=__A ,trim_offsets=__A ) _a : List[str] = tokenizer_r(__A ,return_offsets_mapping=__A ,add_special_tokens=__A ) self.assertEqual(encoding.offset_mapping[0] ,(0, len(__A )) ) self.assertEqual( encoding.offset_mapping[1] ,(len(__A ) + 1, len(__A ) + 1 + len(__A )) ,) _a : int = self.rust_tokenizer_class.from_pretrained( __A ,use_fast=__A ,add_prefix_space=__A ,trim_offsets=__A ) _a : Optional[int] = tokenizer_r(__A ,return_offsets_mapping=__A ,add_special_tokens=__A ) self.assertEqual(encoding.offset_mapping[0] ,(0, len(__A )) ) self.assertEqual( encoding.offset_mapping[1] ,(len(__A ) + 1, len(__A ) + 1 + len(__A )) ,) _a : Optional[Any] = self.rust_tokenizer_class.from_pretrained( __A ,use_fast=__A ,add_prefix_space=__A ,trim_offsets=__A ) _a : Tuple = tokenizer_r(__A ,return_offsets_mapping=__A ,add_special_tokens=__A ) self.assertEqual(encoding.offset_mapping[0] ,(0, len(__A )) ) self.assertEqual( encoding.offset_mapping[1] ,(len(__A ), len(__A ) + 1 + len(__A )) ,) _a : List[str] = self.rust_tokenizer_class.from_pretrained( __A ,use_fast=__A ,add_prefix_space=__A ,trim_offsets=__A ) _a : Any = tokenizer_r(__A ,return_offsets_mapping=__A ,add_special_tokens=__A ) self.assertEqual(encoding.offset_mapping[0] ,(0, len(__A )) ) self.assertEqual( encoding.offset_mapping[1] ,(len(__A ), len(__A ) + 1 + len(__A )) ,) _a : Optional[Any] = F""" {text}""" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) _a : Optional[int] = self.rust_tokenizer_class.from_pretrained( __A ,use_fast=__A ,add_prefix_space=__A ,trim_offsets=__A ) _a : str = tokenizer_r(__A ,return_offsets_mapping=__A ,add_special_tokens=__A ) self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(__A )) ) self.assertEqual( encoding.offset_mapping[1] ,(1 + len(__A ) + 1, 1 + len(__A ) + 1 + len(__A )) ,) _a : Optional[Any] = self.rust_tokenizer_class.from_pretrained( __A ,use_fast=__A ,add_prefix_space=__A ,trim_offsets=__A ) _a : Union[str, Any] = tokenizer_r(__A ,return_offsets_mapping=__A ,add_special_tokens=__A ) self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(__A )) ) self.assertEqual( encoding.offset_mapping[1] ,(1 + len(__A ), 1 + len(__A ) + 1 + len(__A )) ,) _a : List[Any] = self.rust_tokenizer_class.from_pretrained( __A ,use_fast=__A ,add_prefix_space=__A ,trim_offsets=__A ) _a : Optional[Any] = tokenizer_r(__A ,return_offsets_mapping=__A ,add_special_tokens=__A ) self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(__A )) ) self.assertEqual( encoding.offset_mapping[1] ,(1 + len(__A ), 1 + len(__A ) + 1 + len(__A )) ,)
229
from torch import nn def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(f'''Unsupported activation function: {act_fn}''' )
6
0
'''simple docstring''' import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging __lowerCAmelCase =logging.get_logger(__name__) logging.set_verbosity_info() def a ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: """simple docstring""" if "xprophetnet" in prophetnet_checkpoint_path: a_ = XLMProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ ) a_ , a_ = XLMProphetNetForConditionalGeneration.from_pretrained( UpperCamelCase__ , output_loading_info=UpperCamelCase__ ) else: a_ = ProphetNetForConditionalGenerationOld.from_pretrained(UpperCamelCase__ ) a_ , a_ = ProphetNetForConditionalGeneration.from_pretrained( UpperCamelCase__ , output_loading_info=UpperCamelCase__ ) a_ = ['key_proj', 'value_proj', 'query_proj'] a_ = { 'self_attn': 'ngram_self_attn', 'cross_attn': 'encoder_attn', 'cross_attn_layer_norm': 'encoder_attn_layer_norm', 'feed_forward_layer_norm': 'final_layer_norm', 'feed_forward': '', 'intermediate': 'fc1', 'output': 'fc2', 'key_proj': 'k_proj', 'query_proj': 'q_proj', 'value_proj': 'v_proj', 'word_embeddings': 'embed_tokens', 'embeddings_layer_norm': 'emb_layer_norm', 'relative_pos_embeddings': 'relative_linear', 'ngram_embeddings': 'ngram_input_embed', 'position_embeddings': 'embed_positions', } for key in loading_info["missing_keys"]: a_ = key.split('.' ) if attributes[0] == "lm_head": a_ = prophet a_ = prophet_old else: a_ = prophet.prophetnet a_ = prophet_old.model a_ = False for attribute in attributes: if attribute in mapping: a_ = mapping[attribute] if not hasattr(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) > 0: a_ = attribute elif hasattr(UpperCamelCase__ , UpperCamelCase__ ): a_ = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" a_ = old_model.weight logger.info(F'''{attribute} is initialized.''' ) a_ = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" a_ = old_model.bias logger.info(F'''{attribute} is initialized''' ) a_ = True break elif attribute in special_keys and hasattr(UpperCamelCase__ , 'in_proj_weight' ): a_ = old_model.in_proj_weight.shape[0] // 3 a_ = getattr(UpperCamelCase__ , UpperCamelCase__ ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": a_ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) a_ = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": a_ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) a_ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": a_ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) a_ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) a_ = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 5_1_2, "We want 512 position_embeddings." a_ = nn.Parameter(old_model.embed_positions.weight[:5_1_2, :] ) a_ = True break if attribute.isdigit(): a_ = model[int(UpperCamelCase__ )] a_ = old_model[int(UpperCamelCase__ )] else: a_ = getattr(UpperCamelCase__ , UpperCamelCase__ ) if old_attribute == "": a_ = old_model else: if not hasattr(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError(F'''{old_model} does not have {old_attribute}''' ) a_ = getattr(UpperCamelCase__ , UpperCamelCase__ ) if not is_key_init: raise ValueError(F'''{key} was not correctly initialized!''' ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) prophet.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": __lowerCAmelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( "--prophetnet_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __lowerCAmelCase =parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
697
import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): SCREAMING_SNAKE_CASE__ = SwinConfig.from_pretrained( """microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) SCREAMING_SNAKE_CASE__ = MaskFormerConfig(backbone_config=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = """huggingface/label-files""" if "ade20k-full" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 847 SCREAMING_SNAKE_CASE__ = """maskformer-ade20k-full-id2label.json""" elif "ade" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 150 SCREAMING_SNAKE_CASE__ = """ade20k-id2label.json""" elif "coco-stuff" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 171 SCREAMING_SNAKE_CASE__ = """maskformer-coco-stuff-id2label.json""" elif "coco" in model_name: # TODO SCREAMING_SNAKE_CASE__ = 133 SCREAMING_SNAKE_CASE__ = """coco-panoptic-id2label.json""" elif "cityscapes" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 19 SCREAMING_SNAKE_CASE__ = """cityscapes-id2label.json""" elif "vistas" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 65 SCREAMING_SNAKE_CASE__ = """mapillary-vistas-id2label.json""" SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) ) SCREAMING_SNAKE_CASE__ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} return config def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ): SCREAMING_SNAKE_CASE__ = [] # stem # fmt: off rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((f'''backbone.layers.{i}.downsample.reduction.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((f'''backbone.layers.{i}.downsample.norm.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((f'''backbone.layers.{i}.downsample.norm.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') ) # FPN rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((f'''sem_seg_head.adapter_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') ) rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') ) rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') ) rename_keys.append((f'''sem_seg_head.layer_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') ) rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') ) rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') ) rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") ) rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') ) # cross-attention out projection rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') ) # MLP 1 rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') ) # MLP 2 rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') ) # layernorm 1 (self-attention layernorm) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') ) # layernorm 2 (cross-attention layernorm) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') ) # layernorm 3 (final layernorm) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") ) # heads on top rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") ) for i in range(3 ): rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', f'''mask_embedder.{i}.0.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', f'''mask_embedder.{i}.0.bias''') ) # fmt: on return rename_keys def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] ): SCREAMING_SNAKE_CASE__ = dct.pop(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = val def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] ): SCREAMING_SNAKE_CASE__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): SCREAMING_SNAKE_CASE__ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' ) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ = in_proj_weight[:dim, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[: dim] SCREAMING_SNAKE_CASE__ = in_proj_weight[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE__ = in_proj_bias[ dim : dim * 2 ] SCREAMING_SNAKE_CASE__ = in_proj_weight[ -dim :, : ] SCREAMING_SNAKE_CASE__ = in_proj_bias[-dim :] # fmt: on def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ): # fmt: off SCREAMING_SNAKE_CASE__ = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' ) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size] SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2] SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' ) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size] SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2] SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :] # fmt: on def SCREAMING_SNAKE_CASE__ ( ): SCREAMING_SNAKE_CASE__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" SCREAMING_SNAKE_CASE__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: bool = False ): SCREAMING_SNAKE_CASE__ = get_maskformer_config(UpperCamelCase__ ) # load original state_dict with open(UpperCamelCase__ , """rb""" ) as f: SCREAMING_SNAKE_CASE__ = pickle.load(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = data["""model"""] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys SCREAMING_SNAKE_CASE__ = create_rename_keys(UpperCamelCase__ ) for src, dest in rename_keys: rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) read_in_swin_q_k_v(UpperCamelCase__ , config.backbone_config ) read_in_decoder_q_k_v(UpperCamelCase__ , UpperCamelCase__ ) # update to torch tensors for key, value in state_dict.items(): SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase__ ) # load 🤗 model SCREAMING_SNAKE_CASE__ = MaskFormerForInstanceSegmentation(UpperCamelCase__ ) model.eval() for name, param in model.named_parameters(): print(UpperCamelCase__ , param.shape ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(UpperCamelCase__ ) == 0, f'''Unexpected keys: {unexpected_keys}''' # verify results SCREAMING_SNAKE_CASE__ = prepare_img() if "vistas" in model_name: SCREAMING_SNAKE_CASE__ = 65 elif "cityscapes" in model_name: SCREAMING_SNAKE_CASE__ = 65_535 else: SCREAMING_SNAKE_CASE__ = 255 SCREAMING_SNAKE_CASE__ = True if """ade""" in model_name else False SCREAMING_SNAKE_CASE__ = MaskFormerImageProcessor(ignore_index=UpperCamelCase__ , reduce_labels=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = image_processor(UpperCamelCase__ , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE__ = model(**UpperCamelCase__ ) print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": SCREAMING_SNAKE_CASE__ = torch.tensor( [[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' ) Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) image_processor.save_pretrained(UpperCamelCase__ ) if push_to_hub: print("""Pushing model and image processor to the hub...""" ) model.push_to_hub(f'''nielsr/{model_name}''' ) image_processor.push_to_hub(f'''nielsr/{model_name}''' ) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='maskformer-swin-tiny-ade', type=str, help=('Name of the MaskFormer model you\'d like to convert',), ) parser.add_argument( '--checkpoint_path', default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl', type=str, help='Path to the original state dict (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) _lowerCamelCase = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
6
0
"""simple docstring""" from torch import nn class SCREAMING_SNAKE_CASE__ ( nn.Module ): def __init__( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any]): """simple docstring""" super().__init__() lowercase_ = class_size lowercase_ = embed_size # self.mlp1 = nn.Linear(embed_size, embed_size) # self.mlp2 = (nn.Linear(embed_size, class_size)) lowercase_ = nn.Linear(__A , __A) def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Optional[Any]): """simple docstring""" lowercase_ = self.mlp(__A) return logits
567
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCamelCase = logging.get_logger(__name__) _lowerCamelCase = { 'nielsr/canine-s': 2048, } # Unicode defines 1,114,112 total “codepoints” _lowerCamelCase = 1114112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py _lowerCamelCase = 0 _lowerCamelCase = 0XE0_00 _lowerCamelCase = 0XE0_01 _lowerCamelCase = 0XE0_02 _lowerCamelCase = 0XE0_03 _lowerCamelCase = 0XE0_04 # Maps special codepoints to human-readable names. _lowerCamelCase = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. _lowerCamelCase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self :str , __A :str=chr(__A ) , __A :str=chr(__A ) , __A :Dict=chr(__A ) , __A :str=chr(__A ) , __A :Union[str, Any]=chr(__A ) , __A :str=chr(__A ) , __A :int=False , __A :int=2048 , **__A :Dict , ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token super().__init__( bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , model_max_length=__A , **__A , ) # Creates a mapping for looking up the IDs of special symbols. SCREAMING_SNAKE_CASE__ = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): SCREAMING_SNAKE_CASE__ = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. SCREAMING_SNAKE_CASE__ = { codepoint: name for name, codepoint in self._special_codepoints.items() } SCREAMING_SNAKE_CASE__ = UNICODE_VOCAB_SIZE SCREAMING_SNAKE_CASE__ = len(self._special_codepoints ) @property def _snake_case ( self :Optional[Any] ) -> int: """simple docstring""" return self._unicode_vocab_size def _snake_case ( self :Tuple , __A :str ) -> List[str]: """simple docstring""" return list(__A ) def _snake_case ( self :Optional[Any] , __A :str ) -> int: """simple docstring""" try: return ord(__A ) except TypeError: raise ValueError(f'''invalid token: \'{token}\'''' ) def _snake_case ( self :str , __A :int ) -> str: """simple docstring""" try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(__A ) except TypeError: raise ValueError(f'''invalid id: {index}''' ) def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Any: """simple docstring""" return "".join(__A ) def _snake_case ( self :Optional[Any] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [self.sep_token_id] SCREAMING_SNAKE_CASE__ = [self.cls_token_id] SCREAMING_SNAKE_CASE__ = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def _snake_case ( self :List[Any] , __A :List[int] , __A :Optional[List[int]] = None , __A :bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) SCREAMING_SNAKE_CASE__ = [1] + ([0] * len(__A )) + [1] if token_ids_a is not None: result += ([0] * len(__A )) + [1] return result def _snake_case ( self :List[str] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [self.sep_token_id] SCREAMING_SNAKE_CASE__ = [self.cls_token_id] SCREAMING_SNAKE_CASE__ = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def _snake_case ( self :int , __A :str , __A :Optional[str] = None ) -> Any: """simple docstring""" return ()
6
0
import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig lowerCamelCase : int = logging.get_logger(__name__) class _UpperCamelCase : def __init__( self , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]: __lowerCAmelCase = question_encoder __lowerCAmelCase = generator __lowerCAmelCase = self.question_encoder def __UpperCAmelCase ( self , __UpperCamelCase )-> Optional[int]: if os.path.isfile(__A ): raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(__A , exist_ok=__A ) __lowerCAmelCase = os.path.join(__A , "question_encoder_tokenizer" ) __lowerCAmelCase = os.path.join(__A , "generator_tokenizer" ) self.question_encoder.save_pretrained(__A ) self.generator.save_pretrained(__A ) @classmethod def __UpperCAmelCase ( cls , __UpperCamelCase , **__UpperCamelCase )-> Union[str, Any]: from ..auto.tokenization_auto import AutoTokenizer __lowerCAmelCase = kwargs.pop("config" , __A ) if config is None: __lowerCAmelCase = RagConfig.from_pretrained(__A ) __lowerCAmelCase = AutoTokenizer.from_pretrained( __A , config=config.question_encoder , subfolder="question_encoder_tokenizer" ) __lowerCAmelCase = AutoTokenizer.from_pretrained( __A , config=config.generator , subfolder="generator_tokenizer" ) return cls(question_encoder=__A , generator=__A ) def __call__( self , *__UpperCamelCase , **__UpperCamelCase )-> str: return self.current_tokenizer(*__A , **__A ) def __UpperCAmelCase ( self , *__UpperCamelCase , **__UpperCamelCase )-> List[str]: return self.generator.batch_decode(*__A , **__A ) def __UpperCAmelCase ( self , *__UpperCamelCase , **__UpperCamelCase )-> Optional[Any]: return self.generator.decode(*__A , **__A ) def __UpperCAmelCase ( self )-> Any: __lowerCAmelCase = self.question_encoder def __UpperCAmelCase ( self )-> int: __lowerCAmelCase = self.generator def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "longest" , __UpperCamelCase = None , __UpperCamelCase = True , **__UpperCamelCase , )-> BatchEncoding: warnings.warn( "`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the " "regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` " "context manager to prepare your targets. See the documentation of your specific tokenizer for more " "details" , __A , ) if max_length is None: __lowerCAmelCase = self.current_tokenizer.model_max_length __lowerCAmelCase = self( __A , add_special_tokens=__A , return_tensors=__A , max_length=__A , padding=__A , truncation=__A , **__A , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: __lowerCAmelCase = self.current_tokenizer.model_max_length __lowerCAmelCase = self( text_target=__A , add_special_tokens=__A , return_tensors=__A , padding=__A , max_length=__A , truncation=__A , **__A , ) __lowerCAmelCase = labels["input_ids"] return model_inputs
367
import inspect import os import torch from transformers import AutoModel from transformers.testing_utils import mockenv_context from transformers.trainer_utils import set_seed import accelerate from accelerate.accelerator import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils.testing import ( AccelerateTestCase, TempDirTestCase, execute_subprocess_async, require_cuda, require_fsdp, require_multi_gpu, slow, ) from accelerate.utils.constants import ( FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, ) from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin from accelerate.utils.other import patch_environment set_seed(42) _lowerCamelCase = 'bert-base-cased' _lowerCamelCase = 'fp16' _lowerCamelCase = 'bf16' _lowerCamelCase = [FPaa, BFaa] @require_fsdp @require_cuda class UpperCamelCase_ ( UpperCamelCase__ ): def _snake_case ( self :Optional[Any] ) -> Optional[Any]: """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE__ = dict( ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , ) def _snake_case ( self :List[Any] ) -> Tuple: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy for i, strategy in enumerate(__A ): SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = f'''{i + 1}''' SCREAMING_SNAKE_CASE__ = strategy with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) ) def _snake_case ( self :int ) -> List[str]: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch for i, prefetch_policy in enumerate(__A ): SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = prefetch_policy with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() if prefetch_policy == "NO_PREFETCH": self.assertIsNone(fsdp_plugin.backward_prefetch ) else: self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) ) def _snake_case ( self :List[str] ) -> List[str]: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType for i, state_dict_type in enumerate(__A ): SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = state_dict_type with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) ) if state_dict_type == "FULL_STATE_DICT": self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu ) self.assertTrue(fsdp_plugin.state_dict_config.ranka_only ) def _snake_case ( self :str ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = AutoModel.from_pretrained(__A ) for policy in FSDP_AUTO_WRAP_POLICY: SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = policy if policy == "TRANSFORMER_BASED_WRAP": SCREAMING_SNAKE_CASE__ = """BertLayer""" elif policy == "SIZE_BASED_WRAP": SCREAMING_SNAKE_CASE__ = """2000""" with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__A ) if policy == "NO_WRAP": self.assertIsNone(fsdp_plugin.auto_wrap_policy ) else: self.assertIsNotNone(fsdp_plugin.auto_wrap_policy ) SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = """TRANSFORMER_BASED_WRAP""" SCREAMING_SNAKE_CASE__ = """T5Layer""" with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() with self.assertRaises(__A ) as cm: fsdp_plugin.set_auto_wrap_policy(__A ) self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) ) SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = """SIZE_BASED_WRAP""" SCREAMING_SNAKE_CASE__ = """0""" with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__A ) self.assertIsNone(fsdp_plugin.auto_wrap_policy ) def _snake_case ( self :Optional[Any] ) -> Optional[int]: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = mp_dtype with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = Accelerator() if mp_dtype == "fp16": SCREAMING_SNAKE_CASE__ = torch.floataa elif mp_dtype == "bf16": SCREAMING_SNAKE_CASE__ = torch.bfloataa SCREAMING_SNAKE_CASE__ = MixedPrecision(param_dtype=__A , reduce_dtype=__A , buffer_dtype=__A ) self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __A ) if mp_dtype == FPaa: self.assertTrue(isinstance(accelerator.scaler , __A ) ) elif mp_dtype == BFaa: self.assertIsNone(accelerator.scaler ) AcceleratorState._reset_state(__A ) def _snake_case ( self :str ) -> str: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload for flag in [True, False]: SCREAMING_SNAKE_CASE__ = self.dist_env.copy() SCREAMING_SNAKE_CASE__ = str(__A ).lower() with mockenv_context(**__A ): SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__A ) ) @require_fsdp @require_multi_gpu @slow class UpperCamelCase_ ( UpperCamelCase__ ): def _snake_case ( self :Any ) -> Any: """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE__ = 0.8_2 SCREAMING_SNAKE_CASE__ = [ """fsdp_shard_grad_op_transformer_based_wrap""", """fsdp_full_shard_transformer_based_wrap""", ] SCREAMING_SNAKE_CASE__ = { """multi_gpu_fp16""": 3200, """fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000, """fsdp_full_shard_transformer_based_wrap_fp16""": 1900, # Disabling below test as it overwhelms the RAM memory usage # on CI self-hosted runner leading to tests getting killed. # "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang } SCREAMING_SNAKE_CASE__ = 160 SCREAMING_SNAKE_CASE__ = 160 SCREAMING_SNAKE_CASE__ = inspect.getfile(accelerate.test_utils ) SCREAMING_SNAKE_CASE__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] ) def _snake_case ( self :Union[str, Any] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_performance.py""" ) SCREAMING_SNAKE_CASE__ = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""] for config in self.performance_configs: SCREAMING_SNAKE_CASE__ = cmd.copy() for i, strategy in enumerate(__A ): if strategy.lower() in config: cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' ) break if "fp32" in config: cmd_config.append("""--mixed_precision=no""" ) else: cmd_config.append("""--mixed_precision=fp16""" ) if "cpu_offload" in config: cmd_config.append("""--fsdp_offload_params=True""" ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in config: cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" ) elif policy == "SIZE_BASED_WRAP": cmd_config.append("""--fsdp_min_num_params=2000""" ) cmd_config.extend( [ self.test_file_path, f'''--output_dir={self.tmpdir}''', f'''--performance_lower_bound={self.performance_lower_bound}''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A , env=os.environ.copy() ) def _snake_case ( self :Dict ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" ) SCREAMING_SNAKE_CASE__ = [ """accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp""", """--mixed_precision=fp16""", """--fsdp_transformer_layer_cls_to_wrap=BertLayer""", ] for i, strategy in enumerate(__A ): SCREAMING_SNAKE_CASE__ = cmd.copy() cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' ) if strategy != "FULL_SHARD": continue SCREAMING_SNAKE_CASE__ = len(__A ) for state_dict_type in FSDP_STATE_DICT_TYPE: SCREAMING_SNAKE_CASE__ = cmd_config[:state_dict_config_index] cmd_config.append(f'''--fsdp_state_dict_type={state_dict_type}''' ) cmd_config.extend( [ self.test_file_path, f'''--output_dir={self.tmpdir}''', """--partial_train_epoch=1""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A , env=os.environ.copy() ) SCREAMING_SNAKE_CASE__ = cmd_config[:-1] SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdir , """epoch_0""" ) cmd_config.extend( [ f'''--resume_from_checkpoint={resume_from_checkpoint}''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A , env=os.environ.copy() ) def _snake_case ( self :Tuple ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" ) SCREAMING_SNAKE_CASE__ = [ """accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", ] for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): SCREAMING_SNAKE_CASE__ = cmd.copy() if "fp16" in spec: cmd_config.extend(["""--mixed_precision=fp16"""] ) else: cmd_config.extend(["""--mixed_precision=no"""] ) if "multi_gpu" in spec: continue else: cmd_config.extend(["""--use_fsdp"""] ) for i, strategy in enumerate(__A ): if strategy.lower() in spec: cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' ) break if "cpu_offload" in spec: cmd_config.append("""--fsdp_offload_params=True""" ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in spec: cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" ) elif policy == "SIZE_BASED_WRAP": cmd_config.append("""--fsdp_min_num_params=2000""" ) cmd_config.extend( [ self.test_file_path, f'''--output_dir={self.tmpdir}''', f'''--peak_memory_upper_bound={peak_mem_upper_bound}''', f'''--n_train={self.n_train}''', f'''--n_val={self.n_val}''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__A , env=os.environ.copy() )
6
0
import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py a__ : Any = """src/diffusers""" a__ : Optional[int] = """.""" # This is to make sure the diffusers module imported is the one in the repo. a__ : List[str] = importlib.util.spec_from_file_location( """diffusers""", os.path.join(DIFFUSERS_PATH, """__init__.py"""), submodule_search_locations=[DIFFUSERS_PATH], ) a__ : str = spec.loader.load_module() def snake_case (UpperCamelCase : int , UpperCamelCase : Optional[int] ): '''simple docstring''' return line.startswith(UpperCamelCase__ ) or len(UpperCamelCase__ ) <= 1 or re.search(r"""^\s*\)(\s*->.*:|:)\s*$""" , UpperCamelCase__ ) is not None def snake_case (UpperCamelCase : Any ): '''simple docstring''' lowerCamelCase__ = object_name.split(""".""" ) lowerCamelCase__ = 0 # First let's find the module where our object lives. lowerCamelCase__ = parts[i] while i < len(UpperCamelCase__ ) and not os.path.isfile(os.path.join(UpperCamelCase__ , f'''{module}.py''' ) ): i += 1 if i < len(UpperCamelCase__ ): lowerCamelCase__ = os.path.join(UpperCamelCase__ , parts[i] ) if i >= len(UpperCamelCase__ ): raise ValueError(f'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' ) with open(os.path.join(UpperCamelCase__ , f'''{module}.py''' ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.readlines() # Now let's find the class / func in the code! lowerCamelCase__ = """""" lowerCamelCase__ = 0 for name in parts[i + 1 :]: while ( line_index < len(UpperCamelCase__ ) and re.search(rf'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(UpperCamelCase__ ): raise ValueError(f''' {object_name} does not match any function or class in {module}.''' ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). lowerCamelCase__ = line_index while line_index < len(UpperCamelCase__ ) and _should_continue(lines[line_index] , UpperCamelCase__ ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 lowerCamelCase__ = lines[start_index:line_index] return "".join(UpperCamelCase__ ) a__ : List[Any] = re.compile(r"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""") a__ : int = re.compile(r"""^\s*(\S+)->(\S+)(\s+.*|$)""") a__ : List[Any] = re.compile(r"""<FILL\s+[^>]*>""") def snake_case (UpperCamelCase : Dict ): '''simple docstring''' lowerCamelCase__ = code.split("""\n""" ) lowerCamelCase__ = 0 while idx < len(UpperCamelCase__ ) and len(lines[idx] ) == 0: idx += 1 if idx < len(UpperCamelCase__ ): return re.search(r"""^(\s*)\S""" , lines[idx] ).groups()[0] return "" def snake_case (UpperCamelCase : Any ): '''simple docstring''' lowerCamelCase__ = len(get_indent(UpperCamelCase__ ) ) > 0 if has_indent: lowerCamelCase__ = f'''class Bla:\n{code}''' lowerCamelCase__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=UpperCamelCase__ ) lowerCamelCase__ = black.format_str(UpperCamelCase__ , mode=UpperCamelCase__ ) lowerCamelCase__ , lowerCamelCase__ = style_docstrings_in_code(UpperCamelCase__ ) return result[len("""class Bla:\n""" ) :] if has_indent else result def snake_case (UpperCamelCase : Optional[Any] , UpperCamelCase : str=False ): '''simple docstring''' with open(UpperCamelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase__ = f.readlines() lowerCamelCase__ = [] lowerCamelCase__ = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(UpperCamelCase__ ): lowerCamelCase__ = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = search.groups() lowerCamelCase__ = find_code_in_diffusers(UpperCamelCase__ ) lowerCamelCase__ = get_indent(UpperCamelCase__ ) lowerCamelCase__ = line_index + 1 if indent == theoretical_indent else line_index + 2 lowerCamelCase__ = theoretical_indent lowerCamelCase__ = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. lowerCamelCase__ = True while line_index < len(UpperCamelCase__ ) and should_continue: line_index += 1 if line_index >= len(UpperCamelCase__ ): break lowerCamelCase__ = lines[line_index] lowerCamelCase__ = _should_continue(UpperCamelCase__ , UpperCamelCase__ ) and re.search(f'''^{indent}# End copy''' , UpperCamelCase__ ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 lowerCamelCase__ = lines[start_index:line_index] lowerCamelCase__ = """""".join(UpperCamelCase__ ) # Remove any nested `Copied from` comments to avoid circular copies lowerCamelCase__ = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(UpperCamelCase__ ) is None] lowerCamelCase__ = """\n""".join(UpperCamelCase__ ) # Before comparing, use the `replace_pattern` on the original code. if len(UpperCamelCase__ ) > 0: lowerCamelCase__ = replace_pattern.replace("""with""" , """""" ).split(""",""" ) lowerCamelCase__ = [_re_replace_pattern.search(UpperCamelCase__ ) for p in patterns] for pattern in patterns: if pattern is None: continue lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = pattern.groups() lowerCamelCase__ = re.sub(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if option.strip() == "all-casing": lowerCamelCase__ = re.sub(obja.lower() , obja.lower() , UpperCamelCase__ ) lowerCamelCase__ = re.sub(obja.upper() , obja.upper() , UpperCamelCase__ ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line lowerCamelCase__ = blackify(lines[start_index - 1] + theoretical_code ) lowerCamelCase__ = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: lowerCamelCase__ = lines[:start_index] + [theoretical_code] + lines[line_index:] lowerCamelCase__ = start_index + 1 if overwrite and len(UpperCamelCase__ ) > 0: # Warn the user a file has been modified. print(f'''Detected changes, rewriting {filename}.''' ) with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(UpperCamelCase__ ) return diffs def snake_case (UpperCamelCase : bool = False ): '''simple docstring''' lowerCamelCase__ = glob.glob(os.path.join(UpperCamelCase__ , """**/*.py""" ) , recursive=UpperCamelCase__ ) lowerCamelCase__ = [] for filename in all_files: lowerCamelCase__ = is_copy_consistent(UpperCamelCase__ , UpperCamelCase__ ) diffs += [f'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs] if not overwrite and len(UpperCamelCase__ ) > 0: lowerCamelCase__ = """\n""".join(UpperCamelCase__ ) raise Exception( """Found the following copy inconsistencies:\n""" + diff + """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" ) if __name__ == "__main__": a__ : Optional[Any] = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") a__ : Dict = parser.parse_args() check_copies(args.fix_and_overwrite)
165
import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig _lowerCamelCase = logging.get_logger(__name__) # General docstring _lowerCamelCase = 'PoolFormerConfig' # Base docstring _lowerCamelCase = 'sail/poolformer_s12' _lowerCamelCase = [1, 512, 7, 7] # Image classification docstring _lowerCamelCase = 'sail/poolformer_s12' _lowerCamelCase = 'tabby, tabby cat' _lowerCamelCase = [ 'sail/poolformer_s12', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: float = 0.0 , UpperCamelCase__: bool = False ): if drop_prob == 0.0 or not training: return input SCREAMING_SNAKE_CASE__ = 1 - drop_prob SCREAMING_SNAKE_CASE__ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets SCREAMING_SNAKE_CASE__ = keep_prob + torch.rand(UpperCamelCase__ , dtype=input.dtype , device=input.device ) random_tensor.floor_() # binarize SCREAMING_SNAKE_CASE__ = input.div(UpperCamelCase__ ) * random_tensor return output class UpperCamelCase_ ( nn.Module ): def __init__( self :Optional[Any] , __A :Optional[float] = None ) -> None: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = drop_prob def _snake_case ( self :Any , __A :torch.Tensor ) -> torch.Tensor: """simple docstring""" return drop_path(__A , self.drop_prob , self.training ) def _snake_case ( self :Dict ) -> str: """simple docstring""" return "p={}".format(self.drop_prob ) class UpperCamelCase_ ( nn.Module ): def __init__( self :Dict , __A :Optional[Any] , __A :Dict , __A :List[str] , __A :Optional[Any] , __A :Tuple , __A :Optional[Any]=None ) -> Union[str, Any]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = patch_size if isinstance(__A , collections.abc.Iterable ) else (patch_size, patch_size) SCREAMING_SNAKE_CASE__ = stride if isinstance(__A , collections.abc.Iterable ) else (stride, stride) SCREAMING_SNAKE_CASE__ = padding if isinstance(__A , collections.abc.Iterable ) else (padding, padding) SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , kernel_size=__A , stride=__A , padding=__A ) SCREAMING_SNAKE_CASE__ = norm_layer(__A ) if norm_layer else nn.Identity() def _snake_case ( self :Dict , __A :Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.projection(__A ) SCREAMING_SNAKE_CASE__ = self.norm(__A ) return embeddings class UpperCamelCase_ ( nn.GroupNorm ): def __init__( self :Dict , __A :Tuple , **__A :Union[str, Any] ) -> Dict: """simple docstring""" super().__init__(1 , __A , **__A ) class UpperCamelCase_ ( nn.Module ): def __init__( self :List[str] , __A :Optional[int] ) -> Any: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = nn.AvgPoolad(__A , stride=1 , padding=pool_size // 2 , count_include_pad=__A ) def _snake_case ( self :Any , __A :Optional[Any] ) -> Optional[Any]: """simple docstring""" return self.pool(__A ) - hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self :Optional[Any] , __A :Tuple , __A :Dict , __A :int , __A :Any ) -> str: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 ) SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 ) SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A ) if isinstance(config.hidden_act , __A ): SCREAMING_SNAKE_CASE__ = ACTaFN[config.hidden_act] else: SCREAMING_SNAKE_CASE__ = config.hidden_act def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.conva(__A ) SCREAMING_SNAKE_CASE__ = self.act_fn(__A ) SCREAMING_SNAKE_CASE__ = self.drop(__A ) SCREAMING_SNAKE_CASE__ = self.conva(__A ) SCREAMING_SNAKE_CASE__ = self.drop(__A ) return hidden_states class UpperCamelCase_ ( nn.Module ): def __init__( self :Any , __A :str , __A :List[str] , __A :Tuple , __A :Dict , __A :Union[str, Any] , __A :int ) -> Optional[int]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = PoolFormerPooling(__A ) SCREAMING_SNAKE_CASE__ = PoolFormerOutput(__A , __A , __A , __A ) SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A ) SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A ) # Useful for training neural nets SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A ) if drop_path > 0.0 else nn.Identity() SCREAMING_SNAKE_CASE__ = config.use_layer_scale if config.use_layer_scale: SCREAMING_SNAKE_CASE__ = nn.Parameter( config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A ) SCREAMING_SNAKE_CASE__ = nn.Parameter( config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A ) def _snake_case ( self :Optional[Any] , __A :Optional[int] ) -> str: """simple docstring""" if self.use_layer_scale: SCREAMING_SNAKE_CASE__ = self.pooling(self.before_norm(__A ) ) SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A ) SCREAMING_SNAKE_CASE__ = () SCREAMING_SNAKE_CASE__ = self.output(self.after_norm(__A ) ) SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A ) SCREAMING_SNAKE_CASE__ = (output,) + outputs return outputs else: SCREAMING_SNAKE_CASE__ = self.drop_path(self.pooling(self.before_norm(__A ) ) ) # First residual connection SCREAMING_SNAKE_CASE__ = pooling_output + hidden_states SCREAMING_SNAKE_CASE__ = () # Second residual connection inside the PoolFormerOutput block SCREAMING_SNAKE_CASE__ = self.drop_path(self.output(self.after_norm(__A ) ) ) SCREAMING_SNAKE_CASE__ = hidden_states + layer_output SCREAMING_SNAKE_CASE__ = (output,) + outputs return outputs class UpperCamelCase_ ( nn.Module ): def __init__( self :Union[str, Any] , __A :List[Any] ) -> Union[str, Any]: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = config # stochastic depth decay rule SCREAMING_SNAKE_CASE__ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings SCREAMING_SNAKE_CASE__ = [] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A ) # Transformer blocks SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = 0 for i in range(config.num_encoder_blocks ): # each block consists of layers SCREAMING_SNAKE_CASE__ = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( __A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(__A ) ) SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A ) def _snake_case ( self :str , __A :Tuple , __A :Dict=False , __A :Tuple=True ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = () if output_hidden_states else None SCREAMING_SNAKE_CASE__ = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = layers # Get patch embeddings from hidden_states SCREAMING_SNAKE_CASE__ = embedding_layer(__A ) # Send the embeddings through the blocks for _, blk in enumerate(__A ): SCREAMING_SNAKE_CASE__ = blk(__A ) SCREAMING_SNAKE_CASE__ = layer_outputs[0] if output_hidden_states: SCREAMING_SNAKE_CASE__ = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=__A , hidden_states=__A ) class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = PoolFormerConfig lowerCamelCase_ = "poolformer" lowerCamelCase_ = "pixel_values" lowerCamelCase_ = True def _snake_case ( self :Optional[Any] , __A :Tuple ) -> Dict: """simple docstring""" if isinstance(__A , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__A , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def _snake_case ( self :str , __A :Optional[Any] , __A :Union[str, Any]=False ) -> Any: """simple docstring""" if isinstance(__A , __A ): SCREAMING_SNAKE_CASE__ = value _lowerCamelCase = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' _lowerCamelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n' @add_start_docstrings( "The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , ) class UpperCamelCase_ ( UpperCamelCase__ ): def __init__( self :Union[str, Any] , __A :Any ) -> int: """simple docstring""" super().__init__(__A ) SCREAMING_SNAKE_CASE__ = config SCREAMING_SNAKE_CASE__ = PoolFormerEncoder(__A ) # Initialize weights and apply final processing self.post_init() def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(__A ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__A , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _snake_case ( self :Dict , __A :Optional[torch.FloatTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]: """simple docstring""" SCREAMING_SNAKE_CASE__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("""You have to specify pixel_values""" ) SCREAMING_SNAKE_CASE__ = self.encoder( __A , output_hidden_states=__A , return_dict=__A , ) SCREAMING_SNAKE_CASE__ = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=__A , hidden_states=encoder_outputs.hidden_states , ) class UpperCamelCase_ ( nn.Module ): def __init__( self :int , __A :Optional[int] ) -> Tuple: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE__ = nn.Linear(config.hidden_size , config.hidden_size ) def _snake_case ( self :List[Any] , __A :Dict ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.dense(__A ) return output @add_start_docstrings( "\n PoolFormer Model transformer with an image classification head on top\n " , UpperCamelCase__ , ) class UpperCamelCase_ ( UpperCamelCase__ ): def __init__( self :str , __A :Union[str, Any] ) -> int: """simple docstring""" super().__init__(__A ) SCREAMING_SNAKE_CASE__ = config.num_labels SCREAMING_SNAKE_CASE__ = PoolFormerModel(__A ) # Final norm SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head SCREAMING_SNAKE_CASE__ = ( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__A ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _snake_case ( self :int , __A :Optional[torch.FloatTensor] = None , __A :Optional[torch.LongTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]: """simple docstring""" SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict SCREAMING_SNAKE_CASE__ = self.poolformer( __A , output_hidden_states=__A , return_dict=__A , ) SCREAMING_SNAKE_CASE__ = outputs[0] SCREAMING_SNAKE_CASE__ = self.classifier(self.norm(__A ).mean([-2, -1] ) ) SCREAMING_SNAKE_CASE__ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: SCREAMING_SNAKE_CASE__ = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): SCREAMING_SNAKE_CASE__ = """single_label_classification""" else: SCREAMING_SNAKE_CASE__ = """multi_label_classification""" if self.config.problem_type == "regression": SCREAMING_SNAKE_CASE__ = MSELoss() if self.num_labels == 1: SCREAMING_SNAKE_CASE__ = loss_fct(logits.squeeze() , labels.squeeze() ) else: SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A ) elif self.config.problem_type == "single_label_classification": SCREAMING_SNAKE_CASE__ = CrossEntropyLoss() SCREAMING_SNAKE_CASE__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": SCREAMING_SNAKE_CASE__ = BCEWithLogitsLoss() SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A ) if not return_dict: SCREAMING_SNAKE_CASE__ = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=__A , logits=__A , hidden_states=outputs.hidden_states )
6
0
'''simple docstring''' import os def __UpperCamelCase ( ): with open(os.path.dirname(UpperCamelCase__ ) + '''/p022_names.txt''' ) as file: lowercase__ : int = str(file.readlines()[0] ) lowercase__ : Optional[Any] = names.replace('''\"''' , '''''' ).split(''',''' ) names.sort() lowercase__ : str = 0 lowercase__ : Optional[Any] = 0 for i, name in enumerate(UpperCamelCase__ ): for letter in name: name_score += ord(UpperCamelCase__ ) - 64 total_score += (i + 1) * name_score lowercase__ : int = 0 return total_score if __name__ == "__main__": print(solution())
152
import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ ( UpperCamelCase__ ): def __init__( self :Union[str, Any] , __A :Optional[int] , __A :Tuple=13 , __A :Dict=7 , __A :Dict=True , __A :str=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Any=False , __A :Dict=False , __A :Any=False , __A :Tuple=2 , __A :Dict=99 , __A :Optional[Any]=0 , __A :List[str]=32 , __A :Optional[int]=5 , __A :Dict=4 , __A :List[str]=0.1 , __A :Union[str, Any]=0.1 , __A :Tuple=512 , __A :Any=12 , __A :Optional[int]=2 , __A :Union[str, Any]=0.0_2 , __A :Dict=3 , __A :Optional[int]=4 , __A :Any="last" , __A :List[Any]=None , __A :Any=None , ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = parent SCREAMING_SNAKE_CASE__ = batch_size SCREAMING_SNAKE_CASE__ = seq_length SCREAMING_SNAKE_CASE__ = is_training SCREAMING_SNAKE_CASE__ = use_input_lengths SCREAMING_SNAKE_CASE__ = use_token_type_ids SCREAMING_SNAKE_CASE__ = use_labels SCREAMING_SNAKE_CASE__ = gelu_activation SCREAMING_SNAKE_CASE__ = sinusoidal_embeddings SCREAMING_SNAKE_CASE__ = causal SCREAMING_SNAKE_CASE__ = asm SCREAMING_SNAKE_CASE__ = n_langs SCREAMING_SNAKE_CASE__ = vocab_size SCREAMING_SNAKE_CASE__ = n_special SCREAMING_SNAKE_CASE__ = hidden_size SCREAMING_SNAKE_CASE__ = num_hidden_layers SCREAMING_SNAKE_CASE__ = num_attention_heads SCREAMING_SNAKE_CASE__ = hidden_dropout_prob SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ = max_position_embeddings SCREAMING_SNAKE_CASE__ = type_vocab_size SCREAMING_SNAKE_CASE__ = type_sequence_label_size SCREAMING_SNAKE_CASE__ = initializer_range SCREAMING_SNAKE_CASE__ = num_labels SCREAMING_SNAKE_CASE__ = num_choices SCREAMING_SNAKE_CASE__ = summary_type SCREAMING_SNAKE_CASE__ = use_proj SCREAMING_SNAKE_CASE__ = scope def _snake_case ( self :Optional[Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE__ = None if self.use_input_lengths: SCREAMING_SNAKE_CASE__ = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length SCREAMING_SNAKE_CASE__ = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None if self.use_labels: SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , 2 ).float() SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE__ = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _snake_case ( self :List[str] ) -> Optional[int]: """simple docstring""" return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def _snake_case ( self :Tuple , __A :str , __A :int , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[int] , __A :Union[str, Any] , __A :Union[str, Any] , __A :str , ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertModel(config=__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A , lengths=__A , langs=__A ) SCREAMING_SNAKE_CASE__ = model(__A , langs=__A ) SCREAMING_SNAKE_CASE__ = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _snake_case ( self :str , __A :Any , __A :str , __A :Union[str, Any] , __A :Optional[Any] , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[Any] , __A :Union[str, Any] , ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertWithLMHeadModel(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self :Tuple , __A :Union[str, Any] , __A :Optional[Any] , __A :Dict , __A :Dict , __A :Union[str, Any] , __A :List[str] , __A :Optional[int] , __A :int , __A :str , ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnsweringSimple(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A ) SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self :List[str] , __A :Any , __A :int , __A :Tuple , __A :Optional[Any] , __A :Tuple , __A :Optional[int] , __A :str , __A :int , __A :str , ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnswering(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A ) SCREAMING_SNAKE_CASE__ = model( __A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , p_mask=__A , ) SCREAMING_SNAKE_CASE__ = model( __A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , ) ((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple() SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A ) ((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def _snake_case ( self :Optional[int] , __A :str , __A :Optional[int] , __A :Tuple , __A :Dict , __A :List[str] , __A :Tuple , __A :List[str] , __A :Dict , __A :List[str] , ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertForSequenceClassification(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A ) SCREAMING_SNAKE_CASE__ = model(__A , labels=__A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _snake_case ( self :Optional[Any] , __A :Optional[Any] , __A :Optional[Any] , __A :List[str] , __A :Optional[Any] , __A :int , __A :Tuple , __A :Optional[int] , __A :Union[str, Any] , __A :Dict , ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.num_labels SCREAMING_SNAKE_CASE__ = FlaubertForTokenClassification(__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self :str , __A :Any , __A :Tuple , __A :List[str] , __A :Tuple , __A :Any , __A :int , __A :Dict , __A :List[str] , __A :Tuple , ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.num_choices SCREAMING_SNAKE_CASE__ = FlaubertForMultipleChoice(config=__A ) model.to(__A ) model.eval() SCREAMING_SNAKE_CASE__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() SCREAMING_SNAKE_CASE__ = model( __A , attention_mask=__A , token_type_ids=__A , labels=__A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _snake_case ( self :Union[str, Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) = config_and_inputs SCREAMING_SNAKE_CASE__ = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): lowerCamelCase_ = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) lowerCamelCase_ = ( { "feature-extraction": FlaubertModel, "fill-mask": FlaubertWithLMHeadModel, "question-answering": FlaubertForQuestionAnsweringSimple, "text-classification": FlaubertForSequenceClassification, "token-classification": FlaubertForTokenClassification, "zero-shot": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def _snake_case ( self :Any , __A :Optional[int] , __A :Optional[int] , __A :Dict , __A :List[Any] , __A :Tuple ) -> str: """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _snake_case ( self :Tuple , __A :List[str] , __A :Optional[int] , __A :Dict=False ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(__A , __A , return_labels=__A ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": SCREAMING_SNAKE_CASE__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__A ) SCREAMING_SNAKE_CASE__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__A ) return inputs_dict def _snake_case ( self :str ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertModelTester(self ) SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__A , emb_dim=37 ) def _snake_case ( self :int ) -> int: """simple docstring""" self.config_tester.run_common_tests() def _snake_case ( self :Optional[Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*__A ) def _snake_case ( self :Tuple ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*__A ) def _snake_case ( self :str ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*__A ) def _snake_case ( self :Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*__A ) def _snake_case ( self :str ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*__A ) def _snake_case ( self :Any ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*__A ) def _snake_case ( self :Any ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*__A ) @slow def _snake_case ( self :Union[str, Any] ) -> List[str]: """simple docstring""" for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @slow @require_torch_gpu def _snake_case ( self :Tuple ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = model_class(config=__A ) SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__A , __A ) SCREAMING_SNAKE_CASE__ = torch.jit.trace( __A , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__A , os.path.join(__A , """traced_model.pt""" ) ) SCREAMING_SNAKE_CASE__ = torch.jit.load(os.path.join(__A , """traced_model.pt""" ) , map_location=__A ) loaded(inputs_dict["""input_ids"""].to(__A ) , inputs_dict["""attention_mask"""].to(__A ) ) @require_torch class UpperCamelCase_ ( unittest.TestCase ): @slow def _snake_case ( self :Dict ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" ) SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) with torch.no_grad(): SCREAMING_SNAKE_CASE__ = model(__A )[0] SCREAMING_SNAKE_CASE__ = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __A ) SCREAMING_SNAKE_CASE__ = torch.tensor( [[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1E-4 ) )
6
0
"""simple docstring""" from __future__ import annotations from random import random from typing import Generic, TypeVar lowercase__ = TypeVar("KT") lowercase__ = TypeVar("VT") class SCREAMING_SNAKE_CASE__ ( Generic[KT, VT] ): def __init__(self , _lowercase = "root" , _lowercase = None ): '''simple docstring''' __a : Any = key __a : Tuple = value __a : List[Any] = [] def __repr__(self ): '''simple docstring''' return F'''Node({self.key}: {self.value})''' @property def lowerCAmelCase__(self ): '''simple docstring''' return len(self.forward ) class SCREAMING_SNAKE_CASE__ ( Generic[KT, VT] ): def __init__(self , _lowercase = 0.5 , _lowercase = 16 ): '''simple docstring''' __a : int = Node[KT, VT]() __a : int = 0 __a : List[str] = p __a : Union[str, Any] = max_level def __str__(self ): '''simple docstring''' __a : int = list(self ) if len(__A ) == 0: return F'''SkipList(level={self.level})''' __a : Tuple = max((len(str(__A ) ) for item in items) , default=4 ) __a : Any = max(__A , 4 ) + 4 __a : Optional[int] = self.head __a : Any = [] __a : Any = node.forward.copy() lines.append(F'''[{node.key}]'''.ljust(__A , """-""" ) + """* """ * len(__A ) ) lines.append(""" """ * label_size + """| """ * len(__A ) ) while len(node.forward ) != 0: __a : Dict = node.forward[0] lines.append( F'''[{node.key}]'''.ljust(__A , """-""" ) + """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) ) lines.append(""" """ * label_size + """| """ * len(__A ) ) __a : int = node.forward lines.append("""None""".ljust(__A ) + """* """ * len(__A ) ) return F'''SkipList(level={self.level})\n''' + "\n".join(__A ) def __iter__(self ): '''simple docstring''' __a : Tuple = self.head while len(node.forward ) != 0: yield node.forward[0].key __a : List[str] = node.forward[0] def lowerCAmelCase__(self ): '''simple docstring''' __a : List[Any] = 1 while random() < self.p and level < self.max_level: level += 1 return level def lowerCAmelCase__(self , _lowercase ): '''simple docstring''' __a : Dict = [] __a : Any = self.head for i in reversed(range(self.level ) ): # i < node.level - When node level is lesser than `i` decrement `i`. # node.forward[i].key < key - Jumping to node with key value higher # or equal to searched key would result # in skipping searched key. while i < node.level and node.forward[i].key < key: __a : Dict = node.forward[i] # Each leftmost node (relative to searched node) will potentially have to # be updated. update_vector.append(__A ) update_vector.reverse() # Note that we were inserting values in reverse order. # len(node.forward) != 0 - If current node doesn't contain any further # references then searched key is not present. # node.forward[0].key == key - Next node key should be equal to search key # if key is present. if len(node.forward ) != 0 and node.forward[0].key == key: return node.forward[0], update_vector else: return None, update_vector def lowerCAmelCase__(self , _lowercase ): '''simple docstring''' __a , __a : Tuple = self._locate_node(__A ) if node is not None: for i, update_node in enumerate(__A ): # Remove or replace all references to removed node. if update_node.level > i and update_node.forward[i].key == key: if node.level > i: __a : Dict = node.forward[i] else: __a : List[str] = update_node.forward[:i] def lowerCAmelCase__(self , _lowercase , _lowercase ): '''simple docstring''' __a , __a : Dict = self._locate_node(__A ) if node is not None: __a : str = value else: __a : Any = self.random_level() if level > self.level: # After level increase we have to add additional nodes to head. for _ in range(self.level - 1 , __A ): update_vector.append(self.head ) __a : Any = level __a : int = Node(__A , __A ) for i, update_node in enumerate(update_vector[:level] ): # Change references to pass through new node. if update_node.level > i: new_node.forward.append(update_node.forward[i] ) if update_node.level < i + 1: update_node.forward.append(__A ) else: __a : Union[str, Any] = new_node def lowerCAmelCase__(self , _lowercase ): '''simple docstring''' __a , __a : str = self._locate_node(__A ) if node is not None: return node.value return None def __magic_name__ ( ): __a : Any = SkipList() skip_list.insert("""Key1""" , 3 ) skip_list.insert("""Key2""" , 1_2 ) skip_list.insert("""Key3""" , 4_1 ) skip_list.insert("""Key4""" , -1_9 ) __a : Any = skip_list.head __a : Tuple = {} while node.level != 0: __a : List[str] = node.forward[0] __a : Optional[int] = node.value assert len(UpperCamelCase__ ) == 4 assert all_values["Key1"] == 3 assert all_values["Key2"] == 1_2 assert all_values["Key3"] == 4_1 assert all_values["Key4"] == -1_9 def __magic_name__ ( ): __a : List[str] = SkipList() skip_list.insert("""Key1""" , 1_0 ) skip_list.insert("""Key1""" , 1_2 ) skip_list.insert("""Key5""" , 7 ) skip_list.insert("""Key7""" , 1_0 ) skip_list.insert("""Key10""" , 5 ) skip_list.insert("""Key7""" , 7 ) skip_list.insert("""Key5""" , 5 ) skip_list.insert("""Key10""" , 1_0 ) __a : Tuple = skip_list.head __a : Any = {} while node.level != 0: __a : Any = node.forward[0] __a : Tuple = node.value if len(UpperCamelCase__ ) != 4: print() assert len(UpperCamelCase__ ) == 4 assert all_values["Key1"] == 1_2 assert all_values["Key7"] == 7 assert all_values["Key5"] == 5 assert all_values["Key10"] == 1_0 def __magic_name__ ( ): __a : Any = SkipList() assert skip_list.find("""Some key""" ) is None def __magic_name__ ( ): __a : Any = SkipList() skip_list.insert("""Key2""" , 2_0 ) assert skip_list.find("""Key2""" ) == 2_0 skip_list.insert("""Some Key""" , 1_0 ) skip_list.insert("""Key2""" , 8 ) skip_list.insert("""V""" , 1_3 ) assert skip_list.find("""Y""" ) is None assert skip_list.find("""Key2""" ) == 8 assert skip_list.find("""Some Key""" ) == 1_0 assert skip_list.find("""V""" ) == 1_3 def __magic_name__ ( ): __a : Optional[int] = SkipList() skip_list.delete("""Some key""" ) assert len(skip_list.head.forward ) == 0 def __magic_name__ ( ): __a : List[str] = SkipList() skip_list.insert("""Key1""" , 1_2 ) skip_list.insert("""V""" , 1_3 ) skip_list.insert("""X""" , 1_4 ) skip_list.insert("""Key2""" , 1_5 ) skip_list.delete("""V""" ) skip_list.delete("""Key2""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""Key2""" ) is None def __magic_name__ ( ): __a : List[Any] = SkipList() skip_list.insert("""Key1""" , 1_2 ) skip_list.insert("""V""" , 1_3 ) skip_list.insert("""X""" , 1_4 ) skip_list.insert("""Key2""" , 1_5 ) skip_list.delete("""V""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""X""" ) == 1_4 assert skip_list.find("""Key1""" ) == 1_2 assert skip_list.find("""Key2""" ) == 1_5 skip_list.delete("""X""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""X""" ) is None assert skip_list.find("""Key1""" ) == 1_2 assert skip_list.find("""Key2""" ) == 1_5 skip_list.delete("""Key1""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""X""" ) is None assert skip_list.find("""Key1""" ) is None assert skip_list.find("""Key2""" ) == 1_5 skip_list.delete("""Key2""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""X""" ) is None assert skip_list.find("""Key1""" ) is None assert skip_list.find("""Key2""" ) is None def __magic_name__ ( ): __a : Optional[Any] = SkipList() skip_list.insert("""Key1""" , 1_2 ) skip_list.insert("""V""" , 1_3 ) skip_list.insert("""X""" , 1_4_2 ) skip_list.insert("""Key2""" , 1_5 ) skip_list.delete("""X""" ) def traverse_keys(_lowerCamelCase : List[Any] ): yield node.key for forward_node in node.forward: yield from traverse_keys(UpperCamelCase__ ) assert len(set(traverse_keys(skip_list.head ) ) ) == 4 def __magic_name__ ( ): def is_sorted(_lowerCamelCase : str ): return all(next_item >= item for item, next_item in zip(UpperCamelCase__ , lst[1:] ) ) __a : List[str] = SkipList() for i in range(1_0 ): skip_list.insert(UpperCamelCase__ , UpperCamelCase__ ) assert is_sorted(list(UpperCamelCase__ ) ) skip_list.delete(5 ) skip_list.delete(8 ) skip_list.delete(2 ) assert is_sorted(list(UpperCamelCase__ ) ) skip_list.insert(-1_2 , -1_2 ) skip_list.insert(7_7 , 7_7 ) assert is_sorted(list(UpperCamelCase__ ) ) def __magic_name__ ( ): for _ in range(1_0_0 ): # Repeat test 100 times due to the probabilistic nature of skip list # random values == random bugs test_insert() test_insert_overrides_existing_value() test_searching_empty_list_returns_none() test_search() test_deleting_item_from_empty_list_do_nothing() test_deleted_items_are_not_founded_by_find_method() test_delete_removes_only_given_key() test_delete_doesnt_leave_dead_nodes() test_iter_always_yields_sorted_values() def __magic_name__ ( ): __a : Any = SkipList() skip_list.insert(2 , """2""" ) skip_list.insert(4 , """4""" ) skip_list.insert(6 , """4""" ) skip_list.insert(4 , """5""" ) skip_list.insert(8 , """4""" ) skip_list.insert(9 , """4""" ) skip_list.delete(4 ) print(UpperCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
581
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Union[str, Any] ): for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})''' def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple=True ): model.train() SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = F.mse_loss(UpperCamelCase__ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: List[Any]=False ): set_seed(42 ) SCREAMING_SNAKE_CASE__ = RegressionModel() SCREAMING_SNAKE_CASE__ = deepcopy(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 ) SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 ) model.to(accelerator.device ) if sched: SCREAMING_SNAKE_CASE__ = AdamW(params=model.parameters() , lr=1e-3 ) SCREAMING_SNAKE_CASE__ = AdamW(params=ddp_model.parameters() , lr=1e-3 ) SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 ) SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 ) # Make a copy of `model` if sched: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ): # Test when on a single CPU or GPU that the context manager does nothing SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ ) # Use a single batch SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: # Sync grads step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )] def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ): # Test on distributed setup that context manager behaves properly SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ ) # Use a single batch SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: # Sync grads step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )] def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int=False , UpperCamelCase__: Union[str, Any]=False ): SCREAMING_SNAKE_CASE__ = Accelerator( split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ ) for iteration, batch in enumerate(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values() # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCamelCase__ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )] GradientState._reset_state() def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple=False , UpperCamelCase__: List[str]=False ): SCREAMING_SNAKE_CASE__ = Accelerator( split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ , UpperCamelCase__ ) for iteration, batch in enumerate(UpperCamelCase__ ): SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values() # Gather the distributed inputs and targs for the base model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCamelCase__ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n''' SCREAMING_SNAKE_CASE__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCamelCase__ )) if accelerator.num_processes > 1: check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) GradientState._reset_state() def SCREAMING_SNAKE_CASE__ ( ): SCREAMING_SNAKE_CASE__ = Accelerator() SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 ) SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 ) SCREAMING_SNAKE_CASE__ = RegressionDataset(length=96 ) SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(UpperCamelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ ) if iteration < len(UpperCamelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(UpperCamelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ ) if batch_num < len(UpperCamelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def SCREAMING_SNAKE_CASE__ ( ): SCREAMING_SNAKE_CASE__ = Accelerator() SCREAMING_SNAKE_CASE__ = accelerator.state if state.local_process_index == 0: print("""**Test `accumulate` gradient accumulation with dataloader break**""" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("""**Test NOOP `no_sync` context manager**""" ) test_noop_sync(UpperCamelCase__ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("""**Test Distributed `no_sync` context manager**""" ) test_distributed_sync(UpperCamelCase__ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation(UpperCamelCase__ , UpperCamelCase__ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation_with_opt_and_scheduler(UpperCamelCase__ , UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
6
0
def lowercase_ ( SCREAMING_SNAKE_CASE : int = 60_08_51_47_51_43 ): """simple docstring""" try: snake_case__ : Optional[int] =int(UpperCamelCase__ ) except (TypeError, ValueError): raise TypeError('''Parameter n must be int or castable to int.''' ) if n <= 0: raise ValueError('''Parameter n must be greater than or equal to one.''' ) snake_case__ : str =1 snake_case__ : str =2 while i * i <= n: while n % i == 0: snake_case__ : List[str] =i n //= i i += 1 if n > 1: snake_case__ : Any =n return int(UpperCamelCase__ ) if __name__ == "__main__": print(F"""{solution() = }""")
381
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = ["image_processor", "tokenizer"] lowerCamelCase_ = "AutoImageProcessor" lowerCamelCase_ = "AutoTokenizer" def __init__( self :Optional[int] , __A :Optional[Any] , __A :Dict ) -> Dict: """simple docstring""" super().__init__(__A , __A ) SCREAMING_SNAKE_CASE__ = self.image_processor def __call__( self :int , __A :str=None , __A :int=None , __A :Union[str, Any]=None , **__A :str ) -> Optional[Any]: """simple docstring""" if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , return_tensors=__A , **__A ) if images is not None: SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A ) if text is not None and images is not None: SCREAMING_SNAKE_CASE__ = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__A ) , tensor_type=__A ) def _snake_case ( self :str , *__A :List[str] , **__A :List[str] ) -> List[Any]: """simple docstring""" return self.tokenizer.batch_decode(*__A , **__A ) def _snake_case ( self :List[str] , *__A :Any , **__A :Any ) -> Tuple: """simple docstring""" return self.tokenizer.decode(*__A , **__A ) @property def _snake_case ( self :Dict ) -> List[Any]: """simple docstring""" return ["input_ids", "attention_mask", "pixel_values"]
6
0
"""simple docstring""" import os import re import shutil import sys import tempfile import unittest import black __lowerCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. __lowerCamelCase = " \"\"\"\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n" class _snake_case ( unittest.TestCase ): '''simple docstring''' def snake_case_ ( self : List[Any] ): UpperCAmelCase_ :Any = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) ) UpperCAmelCase_ :Optional[int] = self.diffusers_dir shutil.copy( os.path.join(__A , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , ) def snake_case_ ( self : str ): UpperCAmelCase_ :Optional[int] = '''src/diffusers''' shutil.rmtree(self.diffusers_dir ) def snake_case_ ( self : List[Any] , snake_case : Optional[int] , snake_case : List[str] , snake_case : Dict , snake_case : int=None ): UpperCAmelCase_ :Dict = comment + f'\nclass {class_name}(nn.Module):\n' + class_code if overwrite_result is not None: UpperCAmelCase_ :Dict = comment + f'\nclass {class_name}(nn.Module):\n' + overwrite_result UpperCAmelCase_ :List[str] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) UpperCAmelCase_ :Optional[int] = black.format_str(__A , mode=__A ) UpperCAmelCase_ :List[Any] = os.path.join(self.diffusers_dir , '''new_code.py''' ) with open(__A , '''w''' , newline='''\n''' ) as f: f.write(__A ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(__A ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=__A ) with open(__A , '''r''' ) as f: self.assertTrue(f.read() , __A ) def snake_case_ ( self : List[Any] ): UpperCAmelCase_ :List[Any] = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' ) self.assertEqual(__A , __A ) def snake_case_ ( self : List[str] ): self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , ) # With no empty line at the end self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , __A , ) # Copy consistency with rename self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , __A ) , ) # Copy consistency with a really long name UpperCAmelCase_ :int = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason''' self.check_copy_consistency( f'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' , f'{long_class_name}SchedulerOutput' , re.sub('''Bert''' , __A , __A ) , ) # Copy consistency with overwrite self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , __A , overwrite_result=re.sub('''DDPM''' , '''Test''' , __A ) , )
608
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ): SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = sum(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): SCREAMING_SNAKE_CASE__ = True for i in range(1 , s + 1 ): SCREAMING_SNAKE_CASE__ = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): SCREAMING_SNAKE_CASE__ = dp[i][j - 1] if arr[i - 1] <= j: SCREAMING_SNAKE_CASE__ = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: SCREAMING_SNAKE_CASE__ = s - 2 * j break return diff
6
0
"""simple docstring""" import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" def __lowercase ( self :int ): super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __lowercase ( self :Any ): __lowerCamelCase : Union[str, Any] =1 __lowerCamelCase : int =3 __lowerCamelCase : Dict =(32, 32) __lowerCamelCase : Dict =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__A ) return image @property def __lowercase ( self :str ): torch.manual_seed(0 ) __lowerCamelCase : Optional[Any] =UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) return model @property def __lowercase ( self :List[Any] ): torch.manual_seed(0 ) __lowerCamelCase : Optional[Any] =AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) return model @property def __lowercase ( self :Union[str, Any] ): torch.manual_seed(0 ) __lowerCamelCase : Tuple =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModel(__A ) @property def __lowercase ( self :Any ): def extract(*__lowercase :Tuple , **__lowercase :str ): class SCREAMING_SNAKE_CASE_ : """simple docstring""" def __init__( self :Union[str, Any] ): __lowerCamelCase : int =torch.ones([0] ) def __lowercase ( self :Dict , __lowercase :Tuple ): self.pixel_values.to(__A ) return self return Out() return extract def __lowercase ( self :Optional[int] ): __lowerCamelCase : str ='''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCamelCase : Any =self.dummy_cond_unet __lowerCamelCase : List[Any] =DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__A , set_alpha_to_one=__A , ) __lowerCamelCase : Dict =self.dummy_vae __lowerCamelCase : Any =self.dummy_text_encoder __lowerCamelCase : Union[str, Any] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) # make sure here that pndm scheduler skips prk __lowerCamelCase : Dict =StableDiffusionPipeline( unet=__A , scheduler=__A , vae=__A , text_encoder=__A , tokenizer=__A , safety_checker=__A , feature_extractor=self.dummy_extractor , ) __lowerCamelCase : Optional[int] =sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) __lowerCamelCase : List[str] ='''A painting of a squirrel eating a burger''' __lowerCamelCase : int =torch.Generator(device=__A ).manual_seed(0 ) __lowerCamelCase : str =sd_pipe([prompt] , generator=__A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' ) __lowerCamelCase : str =output.images __lowerCamelCase : Any =torch.Generator(device=__A ).manual_seed(0 ) __lowerCamelCase : str =sd_pipe( [prompt] , generator=__A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=__A , )[0] __lowerCamelCase : List[Any] =image[0, -3:, -3:, -1] __lowerCamelCase : Union[str, Any] =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __lowerCamelCase : List[str] =np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def __lowercase ( self :int ): __lowerCamelCase : Dict ='''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCamelCase : Optional[int] =self.dummy_cond_unet __lowerCamelCase : List[Any] =PNDMScheduler(skip_prk_steps=__A ) __lowerCamelCase : Union[str, Any] =self.dummy_vae __lowerCamelCase : Any =self.dummy_text_encoder __lowerCamelCase : Any =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) # make sure here that pndm scheduler skips prk __lowerCamelCase : int =StableDiffusionPipeline( unet=__A , scheduler=__A , vae=__A , text_encoder=__A , tokenizer=__A , safety_checker=__A , feature_extractor=self.dummy_extractor , ) __lowerCamelCase : List[str] =sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) __lowerCamelCase : List[Any] ='''A painting of a squirrel eating a burger''' __lowerCamelCase : Tuple =torch.Generator(device=__A ).manual_seed(0 ) __lowerCamelCase : List[str] =sd_pipe([prompt] , generator=__A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' ) __lowerCamelCase : int =output.images __lowerCamelCase : Optional[int] =torch.Generator(device=__A ).manual_seed(0 ) __lowerCamelCase : Any =sd_pipe( [prompt] , generator=__A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=__A , )[0] __lowerCamelCase : List[Any] =image[0, -3:, -3:, -1] __lowerCamelCase : Optional[Any] =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __lowerCamelCase : str =np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def __lowercase ( self :Tuple ): __lowerCamelCase : Tuple =StableDiffusionPipeline.from_pretrained( '''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=__A ) assert isinstance(__A , __A ) assert isinstance(pipe.scheduler , __A ) assert pipe.safety_checker is None __lowerCamelCase : List[Any] =pipe('''example prompt''' , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(__A ) __lowerCamelCase : Dict =StableDiffusionPipeline.from_pretrained(__A ) # sanity check that the pipeline still works assert pipe.safety_checker is None __lowerCamelCase : Any =pipe('''example prompt''' , num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def __lowercase ( self :Any ): __lowerCamelCase : Optional[int] =self.dummy_cond_unet __lowerCamelCase : Union[str, Any] =PNDMScheduler(skip_prk_steps=__A ) __lowerCamelCase : Union[str, Any] =self.dummy_vae __lowerCamelCase : Optional[int] =self.dummy_text_encoder __lowerCamelCase : Any =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) # put models in fp16 __lowerCamelCase : List[Any] =unet.half() __lowerCamelCase : int =vae.half() __lowerCamelCase : Tuple =bert.half() # make sure here that pndm scheduler skips prk __lowerCamelCase : Tuple =StableDiffusionPipeline( unet=__A , scheduler=__A , vae=__A , text_encoder=__A , tokenizer=__A , safety_checker=__A , feature_extractor=self.dummy_extractor , ) __lowerCamelCase : Optional[int] =sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) __lowerCamelCase : Optional[Any] ='''A painting of a squirrel eating a burger''' __lowerCamelCase : Union[str, Any] =sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''' ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" def __lowercase ( self :int ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase ( self :Dict ): __lowerCamelCase : str =StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=__A ) __lowerCamelCase : Optional[int] =LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) __lowerCamelCase : List[str] =sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) __lowerCamelCase : Optional[Any] =( '''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle''' ''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with''' ''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and''' ''' children from bahnhof zoo, detailed ''' ) __lowerCamelCase : Optional[int] =40_0366_0346 __lowerCamelCase : List[Any] =7 # without safety guidance (sld_guidance_scale = 0) __lowerCamelCase : List[Any] =torch.manual_seed(__A ) __lowerCamelCase : List[Any] =sd_pipe( [prompt] , generator=__A , guidance_scale=__A , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , ) __lowerCamelCase : int =output.images __lowerCamelCase : List[Any] =image[0, -3:, -3:, -1] __lowerCamelCase : Tuple =[0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 # without safety guidance (strong configuration) __lowerCamelCase : Optional[int] =torch.manual_seed(__A ) __lowerCamelCase : Optional[Any] =sd_pipe( [prompt] , generator=__A , guidance_scale=__A , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) __lowerCamelCase : str =output.images __lowerCamelCase : Dict =image[0, -3:, -3:, -1] __lowerCamelCase : List[str] =[0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __lowercase ( self :Any ): __lowerCamelCase : List[str] =StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=__A ) __lowerCamelCase : Optional[int] =LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) __lowerCamelCase : Dict =sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) __lowerCamelCase : List[Any] ='''padme amidala taking a bath artwork, safe for work, no nudity''' __lowerCamelCase : Optional[Any] =27_3497_1755 __lowerCamelCase : Optional[int] =7 __lowerCamelCase : List[str] =torch.manual_seed(__A ) __lowerCamelCase : Union[str, Any] =sd_pipe( [prompt] , generator=__A , guidance_scale=__A , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , ) __lowerCamelCase : str =output.images __lowerCamelCase : Optional[Any] =image[0, -3:, -3:, -1] __lowerCamelCase : Dict =[0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 __lowerCamelCase : Dict =torch.manual_seed(__A ) __lowerCamelCase : Optional[Any] =sd_pipe( [prompt] , generator=__A , guidance_scale=__A , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) __lowerCamelCase : Optional[int] =output.images __lowerCamelCase : Union[str, Any] =image[0, -3:, -3:, -1] __lowerCamelCase : Any =[0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __lowercase ( self :Dict ): __lowerCamelCase : Optional[Any] =StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' ) __lowerCamelCase : int =sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) __lowerCamelCase : int =( '''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.''' ''' leyendecker''' ) __lowerCamelCase : Dict =10_4435_5234 __lowerCamelCase : Any =12 __lowerCamelCase : Tuple =torch.manual_seed(__A ) __lowerCamelCase : Optional[int] =sd_pipe( [prompt] , generator=__A , guidance_scale=__A , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , ) __lowerCamelCase : Optional[int] =output.images __lowerCamelCase : Optional[int] =image[0, -3:, -3:, -1] __lowerCamelCase : Dict =np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7 __lowerCamelCase : List[str] =torch.manual_seed(__A ) __lowerCamelCase : Any =sd_pipe( [prompt] , generator=__A , guidance_scale=__A , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) __lowerCamelCase : int =output.images __lowerCamelCase : Optional[int] =image[0, -3:, -3:, -1] __lowerCamelCase : Tuple =np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
179
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float ): if mass < 0: raise ValueError("""The mass of a body cannot be negative""" ) return 0.5 * mass * abs(UpperCamelCase__ ) * abs(UpperCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
6
0
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ): if a < 0 or b < 0: raise ValueError("""the value of both inputs must be positive""" ) lowercase = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b" lowercase = str(bin(UpperCamelCase__ ) )[2:] lowercase = max(len(UpperCamelCase__ ) ,len(UpperCamelCase__ ) ) return "0b" + "".join( str(int("""1""" in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(UpperCamelCase__ ) ,b_binary.zfill(UpperCamelCase__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
428
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase = logging.get_logger(__name__) class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = "encoder-decoder" lowerCamelCase_ = True def __init__( self :Optional[int] , **__A :str ) -> int: """simple docstring""" super().__init__(**__A ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" SCREAMING_SNAKE_CASE__ = kwargs.pop("""encoder""" ) SCREAMING_SNAKE_CASE__ = encoder_config.pop("""model_type""" ) SCREAMING_SNAKE_CASE__ = kwargs.pop("""decoder""" ) SCREAMING_SNAKE_CASE__ = decoder_config.pop("""model_type""" ) from ..auto.configuration_auto import AutoConfig SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A ) SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A ) SCREAMING_SNAKE_CASE__ = True @classmethod def _snake_case ( cls :str , __A :PretrainedConfig , __A :PretrainedConfig , **__A :List[str] ) -> PretrainedConfig: """simple docstring""" logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" ) SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__A ) def _snake_case ( self :str ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE__ = self.encoder.to_dict() SCREAMING_SNAKE_CASE__ = self.decoder.to_dict() SCREAMING_SNAKE_CASE__ = self.__class__.model_type return output
6
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCAmelCase = { """configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""], """tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = ["""BertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ """BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BertForMaskedLM""", """BertForMultipleChoice""", """BertForNextSentencePrediction""", """BertForPreTraining""", """BertForQuestionAnswering""", """BertForSequenceClassification""", """BertForTokenClassification""", """BertLayer""", """BertLMHeadModel""", """BertModel""", """BertPreTrainedModel""", """load_tf_weights_in_bert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ """TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBertEmbeddings""", """TFBertForMaskedLM""", """TFBertForMultipleChoice""", """TFBertForNextSentencePrediction""", """TFBertForPreTraining""", """TFBertForQuestionAnswering""", """TFBertForSequenceClassification""", """TFBertForTokenClassification""", """TFBertLMHeadModel""", """TFBertMainLayer""", """TFBertModel""", """TFBertPreTrainedModel""", ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = ["""TFBertTokenizer"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ """FlaxBertForCausalLM""", """FlaxBertForMaskedLM""", """FlaxBertForMultipleChoice""", """FlaxBertForNextSentencePrediction""", """FlaxBertForPreTraining""", """FlaxBertForQuestionAnswering""", """FlaxBertForSequenceClassification""", """FlaxBertForTokenClassification""", """FlaxBertModel""", """FlaxBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
229
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase__ ) class UpperCamelCase_ ( UpperCamelCase__ ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization lowerCamelCase_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) lowerCamelCase_ = Features({"text": Value("string" )} ) lowerCamelCase_ = Features({"labels": ClassLabel} ) lowerCamelCase_ = "text" lowerCamelCase_ = "labels" def _snake_case ( self :Any , __A :Dict ) -> Optional[Any]: """simple docstring""" if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , __A ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) SCREAMING_SNAKE_CASE__ = copy.deepcopy(self ) SCREAMING_SNAKE_CASE__ = self.label_schema.copy() SCREAMING_SNAKE_CASE__ = features[self.label_column] SCREAMING_SNAKE_CASE__ = label_schema return task_template @property def _snake_case ( self :str ) -> Dict[str, str]: """simple docstring""" return { self.text_column: "text", self.label_column: "labels", }
6
0
'''simple docstring''' import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed __lowerCAmelCase ={ "distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), "roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), "bert": (BertConfig, BertForMaskedLM, BertTokenizer), "gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def a ( _UpperCAmelCase ) -> Optional[Any]: """simple docstring""" assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def a ( _UpperCAmelCase , _UpperCAmelCase ) -> str: """simple docstring""" if args.student_type == "roberta": a_ = False elif args.student_type == "gpt2": a_ = False def a ( _UpperCAmelCase , _UpperCAmelCase ) -> Any: """simple docstring""" if args.student_type == "roberta": a_ = False def a ( ) -> Tuple: """simple docstring""" a_ = argparse.ArgumentParser(description='Training' ) parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' ) parser.add_argument( '--dump_path' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='The output directory (log, checkpoints, parameters, etc.)' ) parser.add_argument( '--data_file' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , ) parser.add_argument( '--student_type' , type=UpperCamelCase__ , choices=['distilbert', 'roberta', 'gpt2'] , required=UpperCamelCase__ , help='The student type (DistilBERT, RoBERTa).' , ) parser.add_argument('--student_config' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='Path to the student configuration.' ) parser.add_argument( '--student_pretrained_weights' , default=UpperCamelCase__ , type=UpperCamelCase__ , help='Load student initialization checkpoint.' ) parser.add_argument( '--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=UpperCamelCase__ , help='Teacher type (BERT, RoBERTa).' ) parser.add_argument('--teacher_name' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='The teacher model.' ) parser.add_argument('--temperature' , default=2.0 , type=UpperCamelCase__ , help='Temperature for the softmax temperature.' ) parser.add_argument( '--alpha_ce' , default=0.5 , type=UpperCamelCase__ , help='Linear weight for the distillation loss. Must be >=0.' ) parser.add_argument( '--alpha_mlm' , default=0.0 , type=UpperCamelCase__ , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , ) parser.add_argument('--alpha_clm' , default=0.5 , type=UpperCamelCase__ , help='Linear weight for the CLM loss. Must be >=0.' ) parser.add_argument('--alpha_mse' , default=0.0 , type=UpperCamelCase__ , help='Linear weight of the MSE loss. Must be >=0.' ) parser.add_argument( '--alpha_cos' , default=0.0 , type=UpperCamelCase__ , help='Linear weight of the cosine embedding loss. Must be >=0.' ) parser.add_argument( '--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' ) parser.add_argument( '--mlm_mask_prop' , default=0.1_5 , type=UpperCamelCase__ , help='Proportion of tokens for which we need to make a prediction.' , ) parser.add_argument('--word_mask' , default=0.8 , type=UpperCamelCase__ , help='Proportion of tokens to mask out.' ) parser.add_argument('--word_keep' , default=0.1 , type=UpperCamelCase__ , help='Proportion of tokens to keep.' ) parser.add_argument('--word_rand' , default=0.1 , type=UpperCamelCase__ , help='Proportion of tokens to randomly replace.' ) parser.add_argument( '--mlm_smoothing' , default=0.7 , type=UpperCamelCase__ , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , ) parser.add_argument('--token_counts' , type=UpperCamelCase__ , help='The token counts in the data_file for MLM.' ) parser.add_argument( '--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , ) parser.add_argument( '--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , ) parser.add_argument( '--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , ) parser.add_argument('--n_epoch' , type=UpperCamelCase__ , default=3 , help='Number of pass on the whole dataset.' ) parser.add_argument('--batch_size' , type=UpperCamelCase__ , default=5 , help='Batch size (for each process).' ) parser.add_argument( '--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , ) parser.add_argument( '--gradient_accumulation_steps' , type=UpperCamelCase__ , default=5_0 , help='Gradient accumulation for larger training batches.' , ) parser.add_argument('--warmup_prop' , default=0.0_5 , type=UpperCamelCase__ , help='Linear warmup proportion.' ) parser.add_argument('--weight_decay' , default=0.0 , type=UpperCamelCase__ , help='Weight decay if we apply some.' ) parser.add_argument('--learning_rate' , default=5E-4 , type=UpperCamelCase__ , help='The initial learning rate for Adam.' ) parser.add_argument('--adam_epsilon' , default=1E-6 , type=UpperCamelCase__ , help='Epsilon for Adam optimizer.' ) parser.add_argument('--max_grad_norm' , default=5.0 , type=UpperCamelCase__ , help='Max gradient norm.' ) parser.add_argument('--initializer_range' , default=0.0_2 , type=UpperCamelCase__ , help='Random initialization range.' ) parser.add_argument( '--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , ) parser.add_argument( '--fp16_opt_level' , type=UpperCamelCase__ , default='O1' , help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ) , ) parser.add_argument('--n_gpu' , type=UpperCamelCase__ , default=1 , help='Number of GPUs in the node.' ) parser.add_argument('--local_rank' , type=UpperCamelCase__ , default=-1 , help='Distributed training - Local rank' ) parser.add_argument('--seed' , type=UpperCamelCase__ , default=5_6 , help='Random seed' ) parser.add_argument('--log_interval' , type=UpperCamelCase__ , default=5_0_0 , help='Tensorboard logging interval.' ) parser.add_argument('--checkpoint_interval' , type=UpperCamelCase__ , default=4_0_0_0 , help='Checkpoint interval.' ) a_ = parser.parse_args() sanity_checks(UpperCamelCase__ ) # ARGS # init_gpu_params(UpperCamelCase__ ) set_seed(UpperCamelCase__ ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' ' itUse `--force` if you want to overwrite it' ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' ) # SAVE PARAMS # logger.info(F'''Param: {args}''' ) with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f: json.dump(vars(UpperCamelCase__ ) , UpperCamelCase__ , indent=4 ) git_log(args.dump_path ) a_ , a_ , a_ = MODEL_CLASSES[args.student_type] a_ , a_ , a_ = MODEL_CLASSES[args.teacher_type] # TOKENIZER # a_ = teacher_tokenizer_class.from_pretrained(args.teacher_name ) a_ = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): a_ = tokenizer.all_special_tokens.index(UpperCamelCase__ ) a_ = tokenizer.all_special_ids[idx] logger.info(F'''Special tokens {special_tok_ids}''' ) a_ = special_tok_ids a_ = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(F'''Loading data from {args.data_file}''' ) with open(args.data_file , 'rb' ) as fp: a_ = pickle.load(UpperCamelCase__ ) if args.mlm: logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' ) with open(args.token_counts , 'rb' ) as fp: a_ = pickle.load(UpperCamelCase__ ) a_ = np.maximum(UpperCamelCase__ , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): a_ = 0.0 # do not predict special tokens a_ = torch.from_numpy(UpperCamelCase__ ) else: a_ = None a_ = LmSeqsDataset(params=UpperCamelCase__ , data=UpperCamelCase__ ) logger.info('Data loader created.' ) # STUDENT # logger.info(F'''Loading student config from {args.student_config}''' ) a_ = student_config_class.from_pretrained(args.student_config ) a_ = True if args.student_pretrained_weights is not None: logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' ) a_ = student_model_class.from_pretrained(args.student_pretrained_weights , config=UpperCamelCase__ ) else: a_ = student_model_class(UpperCamelCase__ ) if args.n_gpu > 0: student.to(F'''cuda:{args.local_rank}''' ) logger.info('Student loaded.' ) # TEACHER # a_ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=UpperCamelCase__ ) if args.n_gpu > 0: teacher.to(F'''cuda:{args.local_rank}''' ) logger.info(F'''Teacher loaded from {args.teacher_name}.''' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(UpperCamelCase__ , UpperCamelCase__ ) if args.freeze_token_type_embds: freeze_token_type_embeddings(UpperCamelCase__ , UpperCamelCase__ ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() a_ = Distiller( params=UpperCamelCase__ , dataset=UpperCamelCase__ , token_probs=UpperCamelCase__ , student=UpperCamelCase__ , teacher=UpperCamelCase__ ) distiller.train() logger.info('Let\'s go get some drinks.' ) if __name__ == "__main__": main()
697
import argparse import torch from datasets import load_dataset from donut import DonutModel from transformers import ( DonutImageProcessor, DonutProcessor, DonutSwinConfig, DonutSwinModel, MBartConfig, MBartForCausalLM, VisionEncoderDecoderModel, XLMRobertaTokenizerFast, ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): SCREAMING_SNAKE_CASE__ = model.config SCREAMING_SNAKE_CASE__ = DonutSwinConfig( image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , ) SCREAMING_SNAKE_CASE__ = MBartConfig( is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , add_cross_attention=UpperCamelCase__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len( model.decoder.tokenizer ) , scale_embedding=UpperCamelCase__ , add_final_layer_norm=UpperCamelCase__ , ) return encoder_config, decoder_config def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ): if "encoder.model" in name: SCREAMING_SNAKE_CASE__ = name.replace("""encoder.model""" , """encoder""" ) if "decoder.model" in name: SCREAMING_SNAKE_CASE__ = name.replace("""decoder.model""" , """decoder""" ) if "patch_embed.proj" in name: SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if name.startswith("""encoder""" ): if "layers" in name: SCREAMING_SNAKE_CASE__ = """encoder.""" + name if "attn.proj" in name: SCREAMING_SNAKE_CASE__ = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name and "mask" not in name: SCREAMING_SNAKE_CASE__ = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: SCREAMING_SNAKE_CASE__ = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: SCREAMING_SNAKE_CASE__ = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc2""" , """output.dense""" ) if name == "encoder.norm.weight": SCREAMING_SNAKE_CASE__ = """encoder.layernorm.weight""" if name == "encoder.norm.bias": SCREAMING_SNAKE_CASE__ = """encoder.layernorm.bias""" return name def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Optional[int] ): for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE__ = orig_state_dict.pop(UpperCamelCase__ ) if "qkv" in key: SCREAMING_SNAKE_CASE__ = key.split(""".""" ) SCREAMING_SNAKE_CASE__ = int(key_split[3] ) SCREAMING_SNAKE_CASE__ = int(key_split[5] ) SCREAMING_SNAKE_CASE__ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: SCREAMING_SNAKE_CASE__ = val[:dim, :] SCREAMING_SNAKE_CASE__ = val[dim : dim * 2, :] SCREAMING_SNAKE_CASE__ = val[-dim:, :] else: SCREAMING_SNAKE_CASE__ = val[:dim] SCREAMING_SNAKE_CASE__ = val[dim : dim * 2] SCREAMING_SNAKE_CASE__ = val[-dim:] elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]: # HuggingFace implementation doesn't use attn_mask buffer # and model doesn't use final LayerNorms for the encoder pass else: SCREAMING_SNAKE_CASE__ = val return orig_state_dict def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int=None , UpperCamelCase__: str=False ): # load original model SCREAMING_SNAKE_CASE__ = DonutModel.from_pretrained(UpperCamelCase__ ).eval() # load HuggingFace model SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_configs(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = DonutSwinModel(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = MBartForCausalLM(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ ) model.eval() SCREAMING_SNAKE_CASE__ = original_model.state_dict() SCREAMING_SNAKE_CASE__ = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ ) model.load_state_dict(UpperCamelCase__ ) # verify results on scanned document SCREAMING_SNAKE_CASE__ = load_dataset("""hf-internal-testing/example-documents""" ) SCREAMING_SNAKE_CASE__ = dataset["""test"""][0]["""image"""].convert("""RGB""" ) SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizerFast.from_pretrained(UpperCamelCase__ , from_slow=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = DonutImageProcessor( do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] ) SCREAMING_SNAKE_CASE__ = DonutProcessor(UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = processor(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values if model_name == "naver-clova-ix/donut-base-finetuned-docvqa": SCREAMING_SNAKE_CASE__ = """<s_docvqa><s_question>{user_input}</s_question><s_answer>""" SCREAMING_SNAKE_CASE__ = """When is the coffee break?""" SCREAMING_SNAKE_CASE__ = task_prompt.replace("""{user_input}""" , UpperCamelCase__ ) elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip": SCREAMING_SNAKE_CASE__ = """<s_rvlcdip>""" elif model_name in [ "naver-clova-ix/donut-base-finetuned-cord-v1", "naver-clova-ix/donut-base-finetuned-cord-v1-2560", ]: SCREAMING_SNAKE_CASE__ = """<s_cord>""" elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2": SCREAMING_SNAKE_CASE__ = """s_cord-v2>""" elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket": SCREAMING_SNAKE_CASE__ = """<s_zhtrainticket>""" elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]: # use a random prompt SCREAMING_SNAKE_CASE__ = """hello world""" else: raise ValueError("""Model name not supported""" ) SCREAMING_SNAKE_CASE__ = original_model.decoder.tokenizer(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors="""pt""" )[ """input_ids""" ] SCREAMING_SNAKE_CASE__ = original_model.encoder.model.patch_embed(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.encoder.embeddings(UpperCamelCase__ ) assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) # verify encoder hidden states SCREAMING_SNAKE_CASE__ = original_model.encoder(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = model.encoder(UpperCamelCase__ ).last_hidden_state assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-2 ) # verify decoder hidden states SCREAMING_SNAKE_CASE__ = original_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).logits SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCamelCase__ ) processor.save_pretrained(UpperCamelCase__ ) if push_to_hub: model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" ) processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" ) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='naver-clova-ix/donut-base-finetuned-docvqa', required=False, type=str, help='Name of the original model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, required=False, type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model and processor to the 🤗 hub.', ) _lowerCamelCase = parser.parse_args() convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
6
0
"""simple docstring""" def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]: '''simple docstring''' return [sentence[i : i + ngram_size] for i in range(len(UpperCamelCase__ ) - ngram_size + 1 )] if __name__ == "__main__": from doctest import testmod testmod()
567
import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class UpperCamelCase_ ( unittest.TestCase ): def _snake_case ( self :Union[str, Any] ) -> List[str]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self :Any ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" ) SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) sd_pipe.set_scheduler("""sample_euler""" ) SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger""" SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" ) SCREAMING_SNAKE_CASE__ = output.images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _snake_case ( self :str ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) sd_pipe.set_scheduler("""sample_euler""" ) SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger""" SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" ) SCREAMING_SNAKE_CASE__ = output.images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1 def _snake_case ( self :Tuple ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) sd_pipe.set_scheduler("""sample_dpmpp_2m""" ) SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger""" SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = sd_pipe( [prompt] , generator=__A , guidance_scale=7.5 , num_inference_steps=15 , output_type="""np""" , use_karras_sigmas=__A , ) SCREAMING_SNAKE_CASE__ = output.images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ = np.array( [0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
6
0
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase : Tuple = logging.get_logger(__name__) def __lowerCAmelCase ( __snake_case , __snake_case=False ): __lowerCAmelCase = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""deit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""deit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""deit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""deit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""deit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""deit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""deit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""deit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""deit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""deit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "deit.embeddings.cls_token"), ("dist_token", "deit.embeddings.distillation_token"), ("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "deit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" __lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("norm.weight", "deit.layernorm.weight"), ("norm.bias", "deit.layernorm.bias"), ("head.weight", "cls_classifier.weight"), ("head.bias", "cls_classifier.bias"), ("head_dist.weight", "distillation_classifier.weight"), ("head_dist.bias", "distillation_classifier.bias"), ] ) return rename_keys def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case=False ): for i in range(config.num_hidden_layers ): if base_model: __lowerCAmelCase = "" else: __lowerCAmelCase = "deit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" ) __lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict __lowerCAmelCase = in_proj_weight[ : config.hidden_size, : ] __lowerCAmelCase = in_proj_bias[: config.hidden_size] __lowerCAmelCase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __lowerCAmelCase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __lowerCAmelCase = in_proj_weight[ -config.hidden_size :, : ] __lowerCAmelCase = in_proj_bias[-config.hidden_size :] def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ): __lowerCAmelCase = dct.pop(UpperCamelCase__ ) __lowerCAmelCase = val def __lowerCAmelCase ( ): __lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg" __lowerCAmelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ) return im @torch.no_grad() def __lowerCAmelCase ( __snake_case , __snake_case ): __lowerCAmelCase = DeiTConfig() # all deit models have fine-tuned heads __lowerCAmelCase = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size __lowerCAmelCase = 1000 __lowerCAmelCase = "huggingface/label-files" __lowerCAmelCase = "imagenet-1k-id2label.json" __lowerCAmelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="dataset" ) , "r" ) ) __lowerCAmelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} __lowerCAmelCase = idalabel __lowerCAmelCase = {v: k for k, v in idalabel.items()} __lowerCAmelCase = int(deit_name[-6:-4] ) __lowerCAmelCase = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("tiny" ): __lowerCAmelCase = 192 __lowerCAmelCase = 768 __lowerCAmelCase = 12 __lowerCAmelCase = 3 elif deit_name[9:].startswith("small" ): __lowerCAmelCase = 384 __lowerCAmelCase = 1536 __lowerCAmelCase = 12 __lowerCAmelCase = 6 if deit_name[9:].startswith("base" ): pass elif deit_name[4:].startswith("large" ): __lowerCAmelCase = 1024 __lowerCAmelCase = 4096 __lowerCAmelCase = 24 __lowerCAmelCase = 16 # load original model from timm __lowerCAmelCase = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ ) timm_model.eval() # load state_dict of original model, remove and rename some keys __lowerCAmelCase = timm_model.state_dict() __lowerCAmelCase = create_rename_keys(UpperCamelCase__ , UpperCamelCase__ ) for src, dest in rename_keys: rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # load HuggingFace model __lowerCAmelCase = DeiTForImageClassificationWithTeacher(UpperCamelCase__ ).eval() model.load_state_dict(UpperCamelCase__ ) # Check outputs on an image, prepared by DeiTImageProcessor __lowerCAmelCase = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 __lowerCAmelCase = DeiTImageProcessor(size=UpperCamelCase__ , crop_size=config.image_size ) __lowerCAmelCase = image_processor(images=prepare_img() , return_tensors="pt" ) __lowerCAmelCase = encoding["pixel_values"] __lowerCAmelCase = model(UpperCamelCase__ ) __lowerCAmelCase = timm_model(UpperCamelCase__ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(UpperCamelCase__ , outputs.logits , atol=1E-3 ) Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) print(F"""Saving model {deit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(UpperCamelCase__ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": lowerCamelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--deit_name''', default='''vit_deit_base_distilled_patch16_224''', type=str, help='''Name of the DeiT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) lowerCamelCase : Dict = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
367
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int = 600_851_475_143 ): try: SCREAMING_SNAKE_CASE__ = int(UpperCamelCase__ ) except (TypeError, ValueError): raise TypeError("""Parameter n must be int or castable to int.""" ) if n <= 0: raise ValueError("""Parameter n must be greater than or equal to one.""" ) SCREAMING_SNAKE_CASE__ = 1 SCREAMING_SNAKE_CASE__ = 2 while i * i <= n: while n % i == 0: SCREAMING_SNAKE_CASE__ = i n //= i i += 1 if n > 1: SCREAMING_SNAKE_CASE__ = n return int(UpperCamelCase__ ) if __name__ == "__main__": print(F'''{solution() = }''')
6
0
def snake_case (UpperCamelCase : float , UpperCamelCase : list[float] ): '''simple docstring''' if discount_rate < 0: raise ValueError("""Discount rate cannot be negative""" ) if not cash_flows: raise ValueError("""Cash flows list cannot be empty""" ) lowerCamelCase__ = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(UpperCamelCase__ ) ) return round(UpperCamelCase__ , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
165
import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class UpperCamelCase_ ( unittest.TestCase ): def _snake_case ( self :Tuple ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertTrue(is_safetensors_compatible(__A ) ) def _snake_case ( self :List[str] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertTrue(is_safetensors_compatible(__A ) ) def _snake_case ( self :Tuple ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", """unet/diffusion_pytorch_model.bin""", # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__A ) ) def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", ] self.assertTrue(is_safetensors_compatible(__A ) ) def _snake_case ( self :Optional[Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.bin""", """safety_checker/model.safetensors""", """vae/diffusion_pytorch_model.bin""", """vae/diffusion_pytorch_model.safetensors""", """text_encoder/pytorch_model.bin""", # Removed: 'text_encoder/model.safetensors', """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] self.assertFalse(is_safetensors_compatible(__A ) ) def _snake_case ( self :Tuple ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :Any ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :str ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """unet/diffusion_pytorch_model.bin""", """unet/diffusion_pytorch_model.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :List[Any] ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", """unet/diffusion_pytorch_model.fp16.bin""", # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertFalse(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :str ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """text_encoder/pytorch_model.fp16.bin""", """text_encoder/model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :Optional[int] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """text_encoder/pytorch_model.bin""", """text_encoder/model.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertTrue(is_safetensors_compatible(__A , variant=__A ) ) def _snake_case ( self :Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [ """safety_checker/pytorch_model.fp16.bin""", """safety_checker/model.fp16.safetensors""", """vae/diffusion_pytorch_model.fp16.bin""", """vae/diffusion_pytorch_model.fp16.safetensors""", """text_encoder/pytorch_model.fp16.bin""", # 'text_encoder/model.fp16.safetensors', """unet/diffusion_pytorch_model.fp16.bin""", """unet/diffusion_pytorch_model.fp16.safetensors""", ] SCREAMING_SNAKE_CASE__ = """fp16""" self.assertFalse(is_safetensors_compatible(__A , variant=__A ) )
6
0
'''simple docstring''' import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase( self ) -> List[str]: lowercase__ : Union[str, Any] = tempfile.mkdtemp() lowercase__ : Optional[int] = SamImageProcessor() lowercase__ : str = SamProcessor(__A ) processor.save_pretrained(self.tmpdirname ) def _lowerCAmelCase( self , **__lowerCAmelCase ) -> List[str]: return AutoProcessor.from_pretrained(self.tmpdirname , **__A ).image_processor def _lowerCAmelCase( self ) -> List[Any]: shutil.rmtree(self.tmpdirname ) def _lowerCAmelCase( self ) -> List[str]: lowercase__ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowercase__ : str = [Image.fromarray(np.moveaxis(__A , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowerCAmelCase( self ) -> Any: lowercase__ : Any = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase__ : Optional[Any] = self.get_image_processor(do_normalize=__A , padding_value=1.0 ) lowercase__ : List[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__A , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __A ) def _lowerCAmelCase( self ) -> List[str]: lowercase__ : Union[str, Any] = self.get_image_processor() lowercase__ : Tuple = SamProcessor(image_processor=__A ) lowercase__ : Optional[int] = self.prepare_image_inputs() lowercase__ : str = image_processor(__A , return_tensors='''np''' ) lowercase__ : List[str] = processor(images=__A , return_tensors='''np''' ) input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor input_feat_extract.pop('''reshaped_input_sizes''' ) # pop original_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) @require_torch def _lowerCAmelCase( self ) -> Tuple: lowercase__ : List[Any] = self.get_image_processor() lowercase__ : List[Any] = SamProcessor(image_processor=__A ) lowercase__ : Dict = [torch.ones((1, 3, 5, 5) )] lowercase__ : Dict = [[1764, 2646]] lowercase__ : List[str] = [[683, 1024]] lowercase__ : List[str] = processor.post_process_masks(__A , __A , __A ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) lowercase__ : Optional[int] = processor.post_process_masks( __A , torch.tensor(__A ) , torch.tensor(__A ) ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) # should also work with np lowercase__ : List[str] = [np.ones((1, 3, 5, 5) )] lowercase__ : Tuple = processor.post_process_masks(__A , np.array(__A ) , np.array(__A ) ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) lowercase__ : Union[str, Any] = [[1, 0], [0, 1]] with self.assertRaises(__A ): lowercase__ : Union[str, Any] = processor.post_process_masks(__A , np.array(__A ) , np.array(__A ) ) @require_vision @require_tf class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase( self ) -> Union[str, Any]: lowercase__ : List[str] = tempfile.mkdtemp() lowercase__ : str = SamImageProcessor() lowercase__ : Dict = SamProcessor(__A ) processor.save_pretrained(self.tmpdirname ) def _lowerCAmelCase( self , **__lowerCAmelCase ) -> str: return AutoProcessor.from_pretrained(self.tmpdirname , **__A ).image_processor def _lowerCAmelCase( self ) -> List[str]: shutil.rmtree(self.tmpdirname ) def _lowerCAmelCase( self ) -> Any: lowercase__ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowercase__ : List[str] = [Image.fromarray(np.moveaxis(__A , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowerCAmelCase( self ) -> List[str]: lowercase__ : List[str] = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase__ : Optional[Any] = self.get_image_processor(do_normalize=__A , padding_value=1.0 ) lowercase__ : Optional[int] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__A , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __A ) def _lowerCAmelCase( self ) -> List[Any]: lowercase__ : List[Any] = self.get_image_processor() lowercase__ : int = SamProcessor(image_processor=__A ) lowercase__ : Any = self.prepare_image_inputs() lowercase__ : Optional[Any] = image_processor(__A , return_tensors='''np''' ) lowercase__ : Dict = processor(images=__A , return_tensors='''np''' ) input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor input_feat_extract.pop('''reshaped_input_sizes''' ) # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) @require_tf def _lowerCAmelCase( self ) -> Tuple: lowercase__ : Optional[int] = self.get_image_processor() lowercase__ : Optional[int] = SamProcessor(image_processor=__A ) lowercase__ : Optional[Any] = [tf.ones((1, 3, 5, 5) )] lowercase__ : List[Any] = [[1764, 2646]] lowercase__ : List[Any] = [[683, 1024]] lowercase__ : Optional[int] = processor.post_process_masks(__A , __A , __A , return_tensors='''tf''' ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) lowercase__ : int = processor.post_process_masks( __A , tf.convert_to_tensor(__A ) , tf.convert_to_tensor(__A ) , return_tensors='''tf''' , ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) # should also work with np lowercase__ : str = [np.ones((1, 3, 5, 5) )] lowercase__ : Any = processor.post_process_masks( __A , np.array(__A ) , np.array(__A ) , return_tensors='''tf''' ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) lowercase__ : int = [[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError ): lowercase__ : Optional[Any] = processor.post_process_masks( __A , np.array(__A ) , np.array(__A ) , return_tensors='''tf''' ) @require_vision @require_torchvision class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase( self ) -> List[Any]: lowercase__ : str = tempfile.mkdtemp() lowercase__ : Union[str, Any] = SamImageProcessor() lowercase__ : int = SamProcessor(__A ) processor.save_pretrained(self.tmpdirname ) def _lowerCAmelCase( self , **__lowerCAmelCase ) -> Union[str, Any]: return AutoProcessor.from_pretrained(self.tmpdirname , **__A ).image_processor def _lowerCAmelCase( self ) -> List[Any]: shutil.rmtree(self.tmpdirname ) def _lowerCAmelCase( self ) -> Optional[int]: lowercase__ : Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowercase__ : str = [Image.fromarray(np.moveaxis(__A , 0 , -1 ) ) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def _lowerCAmelCase( self ) -> int: lowercase__ : List[Any] = self.get_image_processor() lowercase__ : Tuple = SamProcessor(image_processor=__A ) lowercase__ : Optional[int] = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa ) lowercase__ : List[str] = [tf.convert_to_tensor(__A )] lowercase__ : str = [torch.tensor(__A )] lowercase__ : Dict = [[1764, 2646]] lowercase__ : str = [[683, 1024]] lowercase__ : Union[str, Any] = processor.post_process_masks( __A , __A , __A , return_tensors='''tf''' ) lowercase__ : str = processor.post_process_masks( __A , __A , __A , return_tensors='''pt''' ) self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) ) @is_pt_tf_cross_test def _lowerCAmelCase( self ) -> Optional[Any]: lowercase__ : List[Any] = self.get_image_processor() lowercase__ : Any = SamProcessor(image_processor=__A ) lowercase__ : Any = self.prepare_image_inputs() lowercase__ : int = image_processor(__A , return_tensors='''pt''' )['''pixel_values'''].numpy() lowercase__ : List[Any] = processor(images=__A , return_tensors='''pt''' )['''pixel_values'''].numpy() lowercase__ : List[str] = image_processor(__A , return_tensors='''tf''' )['''pixel_values'''].numpy() lowercase__ : List[str] = processor(images=__A , return_tensors='''tf''' )['''pixel_values'''].numpy() self.assertTrue(np.allclose(__A , __A ) ) self.assertTrue(np.allclose(__A , __A ) ) self.assertTrue(np.allclose(__A , __A ) )
152
import argparse import datetime def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): SCREAMING_SNAKE_CASE__ = { """0""": """Sunday""", """1""": """Monday""", """2""": """Tuesday""", """3""": """Wednesday""", """4""": """Thursday""", """5""": """Friday""", """6""": """Saturday""", } SCREAMING_SNAKE_CASE__ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(UpperCamelCase__ ) < 11: raise ValueError("""Must be 10 characters long""" ) # Get month SCREAMING_SNAKE_CASE__ = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError("""Month must be between 1 - 12""" ) SCREAMING_SNAKE_CASE__ = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError("""Date separator must be '-' or '/'""" ) # Get day SCREAMING_SNAKE_CASE__ = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError("""Date must be between 1 - 31""" ) # Get second separator SCREAMING_SNAKE_CASE__ = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError("""Date separator must be '-' or '/'""" ) # Get year SCREAMING_SNAKE_CASE__ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8_500: raise ValueError( """Year out of range. There has to be some sort of limit...right?""" ) # Get datetime obj for validation SCREAMING_SNAKE_CASE__ = datetime.date(int(UpperCamelCase__ ) , int(UpperCamelCase__ ) , int(UpperCamelCase__ ) ) # Start math if m <= 2: SCREAMING_SNAKE_CASE__ = y - 1 SCREAMING_SNAKE_CASE__ = m + 12 # maths var SCREAMING_SNAKE_CASE__ = int(str(UpperCamelCase__ )[:2] ) SCREAMING_SNAKE_CASE__ = int(str(UpperCamelCase__ )[2:] ) SCREAMING_SNAKE_CASE__ = int(2.6 * m - 5.3_9 ) SCREAMING_SNAKE_CASE__ = int(c / 4 ) SCREAMING_SNAKE_CASE__ = int(k / 4 ) SCREAMING_SNAKE_CASE__ = int(d + k ) SCREAMING_SNAKE_CASE__ = int(t + u + v + x ) SCREAMING_SNAKE_CASE__ = int(z - (2 * c) ) SCREAMING_SNAKE_CASE__ = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" ) # Response SCREAMING_SNAKE_CASE__ = f'''Your date {date_input}, is a {days[str(UpperCamelCase__ )]}!''' return response if __name__ == "__main__": import doctest doctest.testmod() _lowerCamelCase = argparse.ArgumentParser( description=( 'Find out what day of the week nearly any date is or was. Enter ' 'date as a string in the mm-dd-yyyy or mm/dd/yyyy format' ) ) parser.add_argument( 'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)' ) _lowerCamelCase = parser.parse_args() zeller(args.date_input)
6
0
"""simple docstring""" import sys import turtle def __magic_name__ ( _lowerCamelCase : tuple[float, float] , _lowerCamelCase : tuple[float, float] ): return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def __magic_name__ ( _lowerCamelCase : tuple[float, float] , _lowerCamelCase : tuple[float, float] , _lowerCamelCase : tuple[float, float] , _lowerCamelCase : int , ): my_pen.up() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.down() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) if depth == 0: return triangle(UpperCamelCase__ , get_mid(UpperCamelCase__ , UpperCamelCase__ ) , get_mid(UpperCamelCase__ , UpperCamelCase__ ) , depth - 1 ) triangle(UpperCamelCase__ , get_mid(UpperCamelCase__ , UpperCamelCase__ ) , get_mid(UpperCamelCase__ , UpperCamelCase__ ) , depth - 1 ) triangle(UpperCamelCase__ , get_mid(UpperCamelCase__ , UpperCamelCase__ ) , get_mid(UpperCamelCase__ , UpperCamelCase__ ) , depth - 1 ) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( "Correct format for using this script: " "python fractals.py <int:depth_for_fractal>" ) lowercase__ = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor("red") lowercase__ = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
581
import argparse import logging import pickle from collections import Counter logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) _lowerCamelCase = logging.getLogger(__name__) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser( description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)' ) parser.add_argument( '--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.' ) parser.add_argument( '--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.' ) parser.add_argument('--vocab_size', default=30522, type=int) _lowerCamelCase = parser.parse_args() logger.info(F'''Loading data from {args.data_file}''') with open(args.data_file, 'rb') as fp: _lowerCamelCase = pickle.load(fp) logger.info('Counting occurrences for MLM.') _lowerCamelCase = Counter() for tk_ids in data: counter.update(tk_ids) _lowerCamelCase = [0] * args.vocab_size for k, v in counter.items(): _lowerCamelCase = v logger.info(F'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, 'wb') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
6
0
import argparse import json import os import torch from transformers.file_utils import has_file from diffusers import UNetaDConditionModel, UNetaDModel lowerCamelCase__ = False lowerCamelCase__ = True lowerCamelCase__ = False if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument( '''--repo_path''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') lowerCamelCase__ = parser.parse_args() lowerCamelCase__ = { '''image_size''': '''sample_size''', '''num_res_blocks''': '''layers_per_block''', '''block_channels''': '''block_out_channels''', '''down_blocks''': '''down_block_types''', '''up_blocks''': '''up_block_types''', '''downscale_freq_shift''': '''freq_shift''', '''resnet_num_groups''': '''norm_num_groups''', '''resnet_act_fn''': '''act_fn''', '''resnet_eps''': '''norm_eps''', '''num_head_channels''': '''attention_head_dim''', } lowerCamelCase__ = { '''time_steps''': '''time_proj''', '''mid''': '''mid_block''', '''downsample_blocks''': '''down_blocks''', '''upsample_blocks''': '''up_blocks''', } lowerCamelCase__ = '''''' if has_file(args.repo_path, '''config.json''') else '''unet''' with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader: lowerCamelCase__ = reader.read() lowerCamelCase__ = json.loads(text) if do_only_config: for key in config_parameters_to_change.keys(): config.pop(key, None) if has_file(args.repo_path, '''config.json'''): lowerCamelCase__ = UNetaDModel(**config) else: lowerCamelCase__ = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel lowerCamelCase__ = class_name(**config) if do_only_config: model.save_config(os.path.join(args.repo_path, subfolder)) lowerCamelCase__ = dict(model.config) if do_only_renaming: for key, value in config_parameters_to_change.items(): if key in config: lowerCamelCase__ = config[key] del config[key] lowerCamelCase__ = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']] lowerCamelCase__ = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']] if do_only_weights: lowerCamelCase__ = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin''')) lowerCamelCase__ = {} for param_key, param_value in state_dict.items(): if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''): continue lowerCamelCase__ = False for key, new_key in key_parameters_to_change.items(): if not has_changed and param_key.split('''.''')[0] == key: lowerCamelCase__ = param_value lowerCamelCase__ = True if not has_changed: lowerCamelCase__ = param_value model.load_state_dict(new_state_dict) model.save_pretrained(os.path.join(args.repo_path, subfolder))
381
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _lowerCamelCase = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = ['SpeechEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase = ['FlaxSpeechEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys _lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
6
0
"""simple docstring""" class _snake_case : '''simple docstring''' def __init__( self : int , snake_case : list ): UpperCAmelCase_ :Tuple = set_counts UpperCAmelCase_ :Dict = max(__A ) UpperCAmelCase_ :Union[str, Any] = len(__A ) UpperCAmelCase_ :Union[str, Any] = [1] * num_sets UpperCAmelCase_ :Optional[Any] = list(range(__A ) ) def snake_case_ ( self : Union[str, Any] , snake_case : int , snake_case : int ): UpperCAmelCase_ :Any = self.get_parent(__A ) UpperCAmelCase_ :int = self.get_parent(__A ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] UpperCAmelCase_ :int = 0 UpperCAmelCase_ :Optional[Any] = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 UpperCAmelCase_ :int = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] UpperCAmelCase_ :Optional[int] = 0 UpperCAmelCase_ :Optional[Any] = src_parent UpperCAmelCase_ :Optional[int] = self.set_counts[src_parent] UpperCAmelCase_ :Tuple = max(self.max_set , __A ) return True def snake_case_ ( self : Union[str, Any] , snake_case : int ): if self.parents[disj_set] == disj_set: return disj_set UpperCAmelCase_ :List[str] = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
608
import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = ["image_processor", "tokenizer"] lowerCamelCase_ = "OwlViTImageProcessor" lowerCamelCase_ = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self :Optional[Any] , __A :int=None , __A :Optional[int]=None , **__A :str ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __A , ) SCREAMING_SNAKE_CASE__ = kwargs.pop("""feature_extractor""" ) SCREAMING_SNAKE_CASE__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__A , __A ) def __call__( self :str , __A :Dict=None , __A :List[str]=None , __A :str=None , __A :Optional[int]="max_length" , __A :Tuple="np" , **__A :int ) -> Tuple: """simple docstring""" if text is None and query_images is None and images is None: raise ValueError( """You have to specify at least one text or query image or image. All three cannot be none.""" ) if text is not None: if isinstance(__A , __A ) or (isinstance(__A , __A ) and not isinstance(text[0] , __A )): SCREAMING_SNAKE_CASE__ = [self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )] elif isinstance(__A , __A ) and isinstance(text[0] , __A ): SCREAMING_SNAKE_CASE__ = [] # Maximum number of queries across batch SCREAMING_SNAKE_CASE__ = max([len(__A ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__A ) != max_num_queries: SCREAMING_SNAKE_CASE__ = t + [""" """] * (max_num_queries - len(__A )) SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , padding=__A , return_tensors=__A , **__A ) encodings.append(__A ) else: raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" ) if return_tensors == "np": SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 ) SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 ) SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 ) else: raise ValueError("""Target return tensor type could not be returned""" ) SCREAMING_SNAKE_CASE__ = BatchEncoding() SCREAMING_SNAKE_CASE__ = input_ids SCREAMING_SNAKE_CASE__ = attention_mask if query_images is not None: SCREAMING_SNAKE_CASE__ = BatchEncoding() SCREAMING_SNAKE_CASE__ = self.image_processor( __A , return_tensors=__A , **__A ).pixel_values SCREAMING_SNAKE_CASE__ = query_pixel_values if images is not None: SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A ) if text is not None and images is not None: SCREAMING_SNAKE_CASE__ = image_features.pixel_values return encoding elif query_images is not None and images is not None: SCREAMING_SNAKE_CASE__ = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__A ) , tensor_type=__A ) def _snake_case ( self :List[Any] , *__A :Dict , **__A :Dict ) -> Optional[int]: """simple docstring""" return self.image_processor.post_process(*__A , **__A ) def _snake_case ( self :Optional[int] , *__A :Dict , **__A :List[str] ) -> Optional[Any]: """simple docstring""" return self.image_processor.post_process_object_detection(*__A , **__A ) def _snake_case ( self :str , *__A :List[str] , **__A :Union[str, Any] ) -> Any: """simple docstring""" return self.image_processor.post_process_image_guided_detection(*__A , **__A ) def _snake_case ( self :Dict , *__A :List[str] , **__A :List[str] ) -> int: """simple docstring""" return self.tokenizer.batch_decode(*__A , **__A ) def _snake_case ( self :Dict , *__A :Dict , **__A :List[str] ) -> str: """simple docstring""" return self.tokenizer.decode(*__A , **__A ) @property def _snake_case ( self :List[Any] ) -> Optional[int]: """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __A , ) return self.image_processor_class @property def _snake_case ( self :Any ) -> Optional[Any]: """simple docstring""" warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __A , ) return self.image_processor
6
0
"""simple docstring""" import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging _UpperCamelCase = logging.get_logger(__name__) def lowerCAmelCase_ ( ): '''simple docstring''' __lowerCamelCase : List[str] =os.getenv('''SM_HP_MP_PARAMETERS''' , '''{}''' ) try: # Parse it and check the field "partitions" is included, it is required for model parallel. __lowerCamelCase : Tuple =json.loads(UpperCamelCase__ ) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. __lowerCamelCase : int =os.getenv('''SM_FRAMEWORK_PARAMS''' , '''{}''' ) try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". __lowerCamelCase : Optional[Any] =json.loads(UpperCamelCase__ ) if not mpi_options.get('''sagemaker_mpi_enabled''' , UpperCamelCase__ ): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec('''smdistributed''' ) is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ): """simple docstring""" __snake_case : List[str] = field( default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , ) def __lowercase ( self :Any ): super().__post_init__() warnings.warn( '''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use ''' '''`TrainingArguments` instead.''' , __A , ) @cached_property def __lowercase ( self :List[Any] ): logger.info('''PyTorch: setting up devices''' ) if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( '''torch.distributed process group is initialized, but local_rank == -1. ''' '''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' ) if self.no_cuda: __lowerCamelCase : str =torch.device('''cpu''' ) __lowerCamelCase : Any =0 elif is_sagemaker_model_parallel_available(): __lowerCamelCase : List[str] =smp.local_rank() __lowerCamelCase : List[Any] =torch.device('''cuda''' , __A ) __lowerCamelCase : List[Any] =1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 torch.distributed.init_process_group(backend='''smddp''' , timeout=self.ddp_timeout_delta ) __lowerCamelCase : Any =int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) ) __lowerCamelCase : Optional[int] =torch.device('''cuda''' , self.local_rank ) __lowerCamelCase : List[Any] =1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 __lowerCamelCase : Dict =torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' ) # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. __lowerCamelCase : int =torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend='''nccl''' , timeout=self.ddp_timeout_delta ) __lowerCamelCase : Any =torch.device('''cuda''' , self.local_rank ) __lowerCamelCase : Optional[int] =1 if device.type == "cuda": torch.cuda.set_device(__A ) return device @property def __lowercase ( self :Tuple ): if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def __lowercase ( self :int ): return not is_sagemaker_model_parallel_available() @property def __lowercase ( self :Any ): return False
179
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = 42 class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ ): @register_to_config def __init__( self :Union[str, Any] , __A :int = 3 , __A :int = 3 , __A :Tuple[str] = ("DownEncoderBlock2D",) , __A :Tuple[str] = ("UpDecoderBlock2D",) , __A :Tuple[int] = (64,) , __A :int = 1 , __A :str = "silu" , __A :int = 3 , __A :int = 32 , __A :int = 256 , __A :int = 32 , __A :Optional[int] = None , __A :float = 0.1_8_2_1_5 , __A :str = "group" , ) -> Any: """simple docstring""" super().__init__() # pass init params to Encoder SCREAMING_SNAKE_CASE__ = Encoder( in_channels=__A , out_channels=__A , down_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , double_z=__A , ) SCREAMING_SNAKE_CASE__ = vq_embed_dim if vq_embed_dim is not None else latent_channels SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 ) SCREAMING_SNAKE_CASE__ = VectorQuantizer(__A , __A , beta=0.2_5 , remap=__A , sane_index_shape=__A ) SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 ) # pass init params to Decoder SCREAMING_SNAKE_CASE__ = Decoder( in_channels=__A , out_channels=__A , up_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , norm_type=__A , ) @apply_forward_hook def _snake_case ( self :Union[str, Any] , __A :torch.FloatTensor , __A :bool = True ) -> VQEncoderOutput: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.encoder(__A ) SCREAMING_SNAKE_CASE__ = self.quant_conv(__A ) if not return_dict: return (h,) return VQEncoderOutput(latents=__A ) @apply_forward_hook def _snake_case ( self :Tuple , __A :torch.FloatTensor , __A :bool = False , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: """simple docstring""" if not force_not_quantize: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.quantize(__A ) else: SCREAMING_SNAKE_CASE__ = h SCREAMING_SNAKE_CASE__ = self.post_quant_conv(__A ) SCREAMING_SNAKE_CASE__ = self.decoder(__A , quant if self.config.norm_type == """spatial""" else None ) if not return_dict: return (dec,) return DecoderOutput(sample=__A ) def _snake_case ( self :int , __A :torch.FloatTensor , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: """simple docstring""" SCREAMING_SNAKE_CASE__ = sample SCREAMING_SNAKE_CASE__ = self.encode(__A ).latents SCREAMING_SNAKE_CASE__ = self.decode(__A ).sample if not return_dict: return (dec,) return DecoderOutput(sample=__A )
6
0
import itertools import random import unittest import numpy as np from transformers import is_speech_available from transformers.testing_utils import require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import SpeechaTextFeatureExtractor __SCREAMING_SNAKE_CASE : List[str] =random.Random() def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__=1.0 ,lowerCAmelCase__=None ,lowerCAmelCase__=None ): if rng is None: lowercase = global_rng lowercase = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class A_ ( unittest.TestCase ): def __init__( self : Optional[Any] , snake_case__ : str , snake_case__ : int=7 , snake_case__ : Optional[Any]=4_00 , snake_case__ : Tuple=20_00 , snake_case__ : Any=24 , snake_case__ : Optional[Any]=24 , snake_case__ : List[str]=0.0 , snake_case__ : Optional[int]=1_60_00 , snake_case__ : Dict=True , snake_case__ : List[Any]=True , ): lowercase = parent lowercase = batch_size lowercase = min_seq_length lowercase = max_seq_length lowercase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) lowercase = feature_size lowercase = num_mel_bins lowercase = padding_value lowercase = sampling_rate lowercase = return_attention_mask lowercase = do_normalize def SCREAMING_SNAKE_CASE__ ( self : Tuple ): return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : str=False , snake_case__ : Any=False ): def _flatten(snake_case__ : Tuple ): return list(itertools.chain(*__A ) ) if equal_length: lowercase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size lowercase = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: lowercase = [np.asarray(__A ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class A_ ( UpperCamelCase__ , unittest.TestCase ): _A :Tuple = SpeechaTextFeatureExtractor if is_speech_available() else None def SCREAMING_SNAKE_CASE__ ( self : Tuple ): lowercase = SpeechaTextFeatureExtractionTester(self ) def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : Union[str, Any] ): self.assertTrue(np.all(np.mean(__A , axis=0 ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(__A , axis=0 ) - 1 ) < 1E-3 ) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 lowercase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] lowercase = [np.asarray(__A ) for speech_input in speech_inputs] # Test feature size lowercase = feature_extractor(__A , padding=__A , return_tensors="""np""" ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size ) # Test not batched input lowercase = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features lowercase = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features self.assertTrue(np.allclose(__A , __A , atol=1E-3 ) ) # Test batched lowercase = feature_extractor(__A , return_tensors="""np""" ).input_features lowercase = feature_extractor(__A , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(__A , __A ): self.assertTrue(np.allclose(__A , __A , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. lowercase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] lowercase = np.asarray(__A ) lowercase = feature_extractor(__A , return_tensors="""np""" ).input_features lowercase = feature_extractor(__A , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(__A , __A ): self.assertTrue(np.allclose(__A , __A , atol=1E-3 ) ) def SCREAMING_SNAKE_CASE__ ( self : Dict ): lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] lowercase = ["""longest""", """max_length""", """do_not_pad"""] lowercase = [None, 16, None] for max_length, padding in zip(__A , __A ): lowercase = feature_extractor( __A , padding=__A , max_length=__A , return_attention_mask=__A ) lowercase = inputs.input_features lowercase = inputs.attention_mask lowercase = [np.sum(__A ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] lowercase = ["""longest""", """max_length""", """do_not_pad"""] lowercase = [None, 16, None] for max_length, padding in zip(__A , __A ): lowercase = feature_extractor( __A , max_length=__A , padding=__A , return_tensors="""np""" , return_attention_mask=__A ) lowercase = inputs.input_features lowercase = inputs.attention_mask lowercase = [np.sum(__A ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] lowercase = feature_extractor( __A , padding="""max_length""" , max_length=4 , truncation=__A , return_tensors="""np""" , return_attention_mask=__A , ) lowercase = inputs.input_features lowercase = inputs.attention_mask lowercase = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1] ) self._check_zero_mean_unit_variance(input_features[2] ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] lowercase = feature_extractor( __A , padding="""longest""" , max_length=4 , truncation=__A , return_tensors="""np""" , return_attention_mask=__A , ) lowercase = inputs.input_features lowercase = inputs.attention_mask lowercase = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 4, 24) ) lowercase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] lowercase = feature_extractor( __A , padding="""longest""" , max_length=16 , truncation=__A , return_tensors="""np""" , return_attention_mask=__A , ) lowercase = inputs.input_features lowercase = inputs.attention_mask lowercase = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 6, 24) ) def SCREAMING_SNAKE_CASE__ ( self : Any ): import torch lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase = np.random.rand(1_00 , 32 ).astype(np.floataa ) lowercase = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: lowercase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) lowercase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : Union[str, Any] ): from datasets import load_dataset lowercase = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) # automatic decoding with librispeech lowercase = ds.sort("""id""" ).select(range(__A ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def SCREAMING_SNAKE_CASE__ ( self : Dict ): lowercase = np.array([ -1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241, -1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128, -1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625, ] ) # fmt: on lowercase = self._load_datasamples(1 ) lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase = feature_extractor(__A , return_tensors="""pt""" ).input_features self.assertEquals(input_features.shape , (1, 5_84, 24) ) self.assertTrue(np.allclose(input_features[0, 0, :30] , __A , atol=1E-4 ) )
428
import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = 42 lowerCamelCase_ = jnp.floataa lowerCamelCase_ = True def _snake_case ( self :Tuple ) -> Optional[Any]: """simple docstring""" super().setup() SCREAMING_SNAKE_CASE__ = nn.Dense(5 , dtype=self.dtype ) def __call__( self :List[Any] , *__A :int , **__A :Optional[Any] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = super().__call__(*__A , **__A ) SCREAMING_SNAKE_CASE__ = self.cls(outputs[2] ) return outputs[:2] + (cls_out,) class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = FlaxBigBirdForNaturalQuestionsModule def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Tuple , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ): def cross_entropy(UpperCamelCase__: List[str] , UpperCamelCase__: List[str] , UpperCamelCase__: List[str]=None ): SCREAMING_SNAKE_CASE__ = logits.shape[-1] SCREAMING_SNAKE_CASE__ = (labels[..., None] == jnp.arange(UpperCamelCase__ )[None]).astype("""f4""" ) SCREAMING_SNAKE_CASE__ = jax.nn.log_softmax(UpperCamelCase__ , axis=-1 ) SCREAMING_SNAKE_CASE__ = -jnp.sum(labels * logits , axis=-1 ) if reduction is not None: SCREAMING_SNAKE_CASE__ = reduction(UpperCamelCase__ ) return loss SCREAMING_SNAKE_CASE__ = partial(UpperCamelCase__ , reduction=jnp.mean ) SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class UpperCamelCase_ : lowerCamelCase_ = "google/bigbird-roberta-base" lowerCamelCase_ = 30_00 lowerCamelCase_ = 1_05_00 lowerCamelCase_ = 1_28 lowerCamelCase_ = 3 lowerCamelCase_ = 1 lowerCamelCase_ = 5 # tx_args lowerCamelCase_ = 3e-5 lowerCamelCase_ = 0.0 lowerCamelCase_ = 2_00_00 lowerCamelCase_ = 0.0095 lowerCamelCase_ = "bigbird-roberta-natural-questions" lowerCamelCase_ = "training-expt" lowerCamelCase_ = "data/nq-training.jsonl" lowerCamelCase_ = "data/nq-validation.jsonl" def _snake_case ( self :str ) -> Optional[int]: """simple docstring""" os.makedirs(self.base_dir , exist_ok=__A ) SCREAMING_SNAKE_CASE__ = os.path.join(self.base_dir , self.save_dir ) SCREAMING_SNAKE_CASE__ = self.batch_size_per_device * jax.device_count() @dataclass class UpperCamelCase_ : lowerCamelCase_ = 42 lowerCamelCase_ = 40_96 # no dynamic padding on TPUs def __call__( self :Optional[Any] , __A :Optional[int] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.collate_fn(__A ) SCREAMING_SNAKE_CASE__ = jax.tree_util.tree_map(__A , __A ) return batch def _snake_case ( self :List[Any] , __A :Union[str, Any] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.fetch_inputs(features["""input_ids"""] ) SCREAMING_SNAKE_CASE__ = { """input_ids""": jnp.array(__A , dtype=jnp.intaa ), """attention_mask""": jnp.array(__A , dtype=jnp.intaa ), """start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ), """end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ), """pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ), } return batch def _snake_case ( self :Tuple , __A :list ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = [self._fetch_inputs(__A ) for ids in input_ids] return zip(*__A ) def _snake_case ( self :List[str] , __A :list ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [1 for _ in range(len(__A ) )] while len(__A ) < self.max_length: input_ids.append(self.pad_id ) attention_mask.append(0 ) return input_ids, attention_mask def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any]=None ): if seed is not None: SCREAMING_SNAKE_CASE__ = dataset.shuffle(seed=UpperCamelCase__ ) for i in range(len(UpperCamelCase__ ) // batch_size ): SCREAMING_SNAKE_CASE__ = dataset[i * batch_size : (i + 1) * batch_size] yield dict(UpperCamelCase__ ) @partial(jax.pmap , axis_name="""batch""" ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] , **UpperCamelCase__: Optional[int] ): def loss_fn(UpperCamelCase__: List[Any] ): SCREAMING_SNAKE_CASE__ = model_inputs.pop("""start_labels""" ) SCREAMING_SNAKE_CASE__ = model_inputs.pop("""end_labels""" ) SCREAMING_SNAKE_CASE__ = model_inputs.pop("""pooled_labels""" ) SCREAMING_SNAKE_CASE__ = state.apply_fn(**UpperCamelCase__ , params=UpperCamelCase__ , dropout_rng=UpperCamelCase__ , train=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs return state.loss_fn( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = jax.random.split(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = jax.value_and_grad(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = grad_fn(state.params ) SCREAMING_SNAKE_CASE__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) SCREAMING_SNAKE_CASE__ = jax.lax.pmean(UpperCamelCase__ , """batch""" ) SCREAMING_SNAKE_CASE__ = state.apply_gradients(grads=UpperCamelCase__ ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name="""batch""" ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , **UpperCamelCase__: Dict ): SCREAMING_SNAKE_CASE__ = model_inputs.pop("""start_labels""" ) SCREAMING_SNAKE_CASE__ = model_inputs.pop("""end_labels""" ) SCREAMING_SNAKE_CASE__ = model_inputs.pop("""pooled_labels""" ) SCREAMING_SNAKE_CASE__ = state.apply_fn(**UpperCamelCase__ , params=state.params , train=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs SCREAMING_SNAKE_CASE__ = state.loss_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) return metrics class UpperCamelCase_ ( train_state.TrainState ): lowerCamelCase_ = struct.field(pytree_node=UpperCamelCase__ ) @dataclass class UpperCamelCase_ : lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = None def _snake_case ( self :List[Any] , __A :str , __A :str , __A :str , __A :Tuple=None ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = model.params SCREAMING_SNAKE_CASE__ = TrainState.create( apply_fn=model.__call__ , params=__A , tx=__A , loss_fn=__A , ) if ckpt_dir is not None: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = restore_checkpoint(__A , __A ) SCREAMING_SNAKE_CASE__ = { """lr""": args.lr, """init_lr""": args.init_lr, """warmup_steps""": args.warmup_steps, """num_train_steps""": num_train_steps, """weight_decay""": args.weight_decay, } SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = build_tx(**__A ) SCREAMING_SNAKE_CASE__ = train_state.TrainState( step=__A , apply_fn=model.__call__ , params=__A , tx=__A , opt_state=__A , ) SCREAMING_SNAKE_CASE__ = args SCREAMING_SNAKE_CASE__ = data_collator SCREAMING_SNAKE_CASE__ = lr SCREAMING_SNAKE_CASE__ = params SCREAMING_SNAKE_CASE__ = jax_utils.replicate(__A ) return state def _snake_case ( self :Optional[Any] , __A :Optional[int] , __A :int , __A :int ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.args SCREAMING_SNAKE_CASE__ = len(__A ) // args.batch_size SCREAMING_SNAKE_CASE__ = jax.random.PRNGKey(0 ) SCREAMING_SNAKE_CASE__ = jax.random.split(__A , jax.device_count() ) for epoch in range(args.max_epochs ): SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE__ = get_batched_dataset(__A , args.batch_size , seed=__A ) SCREAMING_SNAKE_CASE__ = 0 for batch in tqdm(__A , total=__A , desc=f'''Running EPOCH-{epoch}''' ): SCREAMING_SNAKE_CASE__ = self.data_collator(__A ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.train_step_fn(__A , __A , **__A ) running_loss += jax_utils.unreplicate(metrics["""loss"""] ) i += 1 if i % args.logging_steps == 0: SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(state.step ) SCREAMING_SNAKE_CASE__ = running_loss.item() / i SCREAMING_SNAKE_CASE__ = self.scheduler_fn(state_step - 1 ) SCREAMING_SNAKE_CASE__ = self.evaluate(__A , __A ) SCREAMING_SNAKE_CASE__ = { """step""": state_step.item(), """eval_loss""": eval_loss.item(), """tr_loss""": tr_loss, """lr""": lr.item(), } tqdm.write(str(__A ) ) self.logger.log(__A , commit=__A ) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=__A ) def _snake_case ( self :List[str] , __A :Dict , __A :str ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = get_batched_dataset(__A , self.args.batch_size ) SCREAMING_SNAKE_CASE__ = len(__A ) // self.args.batch_size SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa ) SCREAMING_SNAKE_CASE__ = 0 for batch in tqdm(__A , total=__A , desc="""Evaluating ... """ ): SCREAMING_SNAKE_CASE__ = self.data_collator(__A ) SCREAMING_SNAKE_CASE__ = self.val_step_fn(__A , **__A ) running_loss += jax_utils.unreplicate(metrics["""loss"""] ) i += 1 return running_loss / i def _snake_case ( self :List[Any] , __A :Any , __A :Dict ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(__A ) print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=""" ... """ ) self.model_save_fn(__A , params=state.params ) with open(os.path.join(__A , """opt_state.msgpack""" ) , """wb""" ) as f: f.write(to_bytes(state.opt_state ) ) joblib.dump(self.args , os.path.join(__A , """args.joblib""" ) ) joblib.dump(self.data_collator , os.path.join(__A , """data_collator.joblib""" ) ) with open(os.path.join(__A , """training_state.json""" ) , """w""" ) as f: json.dump({"""step""": state.step.item()} , __A ) print("""DONE""" ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ): print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=""" ... """ ) with open(os.path.join(UpperCamelCase__ , """flax_model.msgpack""" ) , """rb""" ) as f: SCREAMING_SNAKE_CASE__ = from_bytes(state.params , f.read() ) with open(os.path.join(UpperCamelCase__ , """opt_state.msgpack""" ) , """rb""" ) as f: SCREAMING_SNAKE_CASE__ = from_bytes(state.opt_state , f.read() ) SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(UpperCamelCase__ , """args.joblib""" ) ) SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(UpperCamelCase__ , """data_collator.joblib""" ) ) with open(os.path.join(UpperCamelCase__ , """training_state.json""" ) , """r""" ) as f: SCREAMING_SNAKE_CASE__ = json.load(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = training_state["""step"""] print("""DONE""" ) return params, opt_state, step, args, data_collator def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ): SCREAMING_SNAKE_CASE__ = num_train_steps - warmup_steps SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=UpperCamelCase__ , transition_steps=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=1e-7 , transition_steps=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple ): def weight_decay_mask(UpperCamelCase__: Any ): SCREAMING_SNAKE_CASE__ = traverse_util.flatten_dict(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()} return traverse_util.unflatten_dict(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = scheduler_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = optax.adamw(learning_rate=UpperCamelCase__ , weight_decay=UpperCamelCase__ , mask=UpperCamelCase__ ) return tx, lr
6
0
'''simple docstring''' __lowerCAmelCase = """\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n""" __lowerCAmelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}] __lowerCAmelCase = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
229
from torch import nn def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(f'''Unsupported activation function: {act_fn}''' )
6
0
'''simple docstring''' import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 __lowerCAmelCase =data_utils.TransfoXLTokenizer __lowerCAmelCase =data_utils.TransfoXLCorpus __lowerCAmelCase =data_utils __lowerCAmelCase =data_utils def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str: """simple docstring""" if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(UpperCamelCase__ , 'rb' ) as fp: a_ = pickle.load(UpperCamelCase__ , encoding='latin1' ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) a_ = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file'] print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' ) a_ = corpus.vocab.__dict__ torch.save(UpperCamelCase__ , UpperCamelCase__ ) a_ = corpus.__dict__ corpus_dict_no_vocab.pop('vocab' , UpperCamelCase__ ) a_ = pytorch_dump_folder_path + '/' + CORPUS_NAME print(F'''Save dataset to {pytorch_dataset_dump_path}''' ) torch.save(UpperCamelCase__ , UpperCamelCase__ ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model a_ = os.path.abspath(UpperCamelCase__ ) a_ = os.path.abspath(UpperCamelCase__ ) print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' ) # Initialise PyTorch model if transfo_xl_config_file == "": a_ = TransfoXLConfig() else: a_ = TransfoXLConfig.from_json_file(UpperCamelCase__ ) print(F'''Building PyTorch model from configuration: {config}''' ) a_ = TransfoXLLMHeadModel(UpperCamelCase__ ) a_ = load_tf_weights_in_transfo_xl(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save pytorch-model a_ = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) a_ = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) print(F'''Save PyTorch model to {os.path.abspath(UpperCamelCase__ )}''' ) torch.save(model.state_dict() , UpperCamelCase__ ) print(F'''Save configuration file to {os.path.abspath(UpperCamelCase__ )}''' ) with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __lowerCAmelCase =argparse.ArgumentParser() parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--tf_checkpoint_path", default="", type=str, help="An optional path to a TensorFlow checkpoint path to be converted.", ) parser.add_argument( "--transfo_xl_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--transfo_xl_dataset_file", default="", type=str, help="An optional dataset file to be converted in a vocabulary.", ) __lowerCAmelCase =parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
697
import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ): SCREAMING_SNAKE_CASE__ = SwinConfig.from_pretrained( """microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) SCREAMING_SNAKE_CASE__ = MaskFormerConfig(backbone_config=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = """huggingface/label-files""" if "ade20k-full" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 847 SCREAMING_SNAKE_CASE__ = """maskformer-ade20k-full-id2label.json""" elif "ade" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 150 SCREAMING_SNAKE_CASE__ = """ade20k-id2label.json""" elif "coco-stuff" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 171 SCREAMING_SNAKE_CASE__ = """maskformer-coco-stuff-id2label.json""" elif "coco" in model_name: # TODO SCREAMING_SNAKE_CASE__ = 133 SCREAMING_SNAKE_CASE__ = """coco-panoptic-id2label.json""" elif "cityscapes" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 19 SCREAMING_SNAKE_CASE__ = """cityscapes-id2label.json""" elif "vistas" in model_name: # this should be ok SCREAMING_SNAKE_CASE__ = 65 SCREAMING_SNAKE_CASE__ = """mapillary-vistas-id2label.json""" SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) ) SCREAMING_SNAKE_CASE__ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} return config def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ): SCREAMING_SNAKE_CASE__ = [] # stem # fmt: off rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((f'''backbone.layers.{i}.downsample.reduction.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((f'''backbone.layers.{i}.downsample.norm.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((f'''backbone.layers.{i}.downsample.norm.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') ) # FPN rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((f'''sem_seg_head.adapter_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') ) rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') ) rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') ) rename_keys.append((f'''sem_seg_head.layer_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') ) rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') ) rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') ) rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") ) rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') ) # cross-attention out projection rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') ) # MLP 1 rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') ) # MLP 2 rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') ) # layernorm 1 (self-attention layernorm) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') ) # layernorm 2 (cross-attention layernorm) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') ) # layernorm 3 (final layernorm) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") ) # heads on top rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") ) for i in range(3 ): rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', f'''mask_embedder.{i}.0.weight''') ) rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', f'''mask_embedder.{i}.0.bias''') ) # fmt: on return rename_keys def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] ): SCREAMING_SNAKE_CASE__ = dct.pop(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = val def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] ): SCREAMING_SNAKE_CASE__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): SCREAMING_SNAKE_CASE__ = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' ) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ = in_proj_weight[:dim, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[: dim] SCREAMING_SNAKE_CASE__ = in_proj_weight[ dim : dim * 2, : ] SCREAMING_SNAKE_CASE__ = in_proj_bias[ dim : dim * 2 ] SCREAMING_SNAKE_CASE__ = in_proj_weight[ -dim :, : ] SCREAMING_SNAKE_CASE__ = in_proj_bias[-dim :] # fmt: on def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ): # fmt: off SCREAMING_SNAKE_CASE__ = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' ) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size] SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2] SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' ) SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size] SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2] SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :] SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :] # fmt: on def SCREAMING_SNAKE_CASE__ ( ): SCREAMING_SNAKE_CASE__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" SCREAMING_SNAKE_CASE__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: bool = False ): SCREAMING_SNAKE_CASE__ = get_maskformer_config(UpperCamelCase__ ) # load original state_dict with open(UpperCamelCase__ , """rb""" ) as f: SCREAMING_SNAKE_CASE__ = pickle.load(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = data["""model"""] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys SCREAMING_SNAKE_CASE__ = create_rename_keys(UpperCamelCase__ ) for src, dest in rename_keys: rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) read_in_swin_q_k_v(UpperCamelCase__ , config.backbone_config ) read_in_decoder_q_k_v(UpperCamelCase__ , UpperCamelCase__ ) # update to torch tensors for key, value in state_dict.items(): SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase__ ) # load 🤗 model SCREAMING_SNAKE_CASE__ = MaskFormerForInstanceSegmentation(UpperCamelCase__ ) model.eval() for name, param in model.named_parameters(): print(UpperCamelCase__ , param.shape ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(UpperCamelCase__ ) == 0, f'''Unexpected keys: {unexpected_keys}''' # verify results SCREAMING_SNAKE_CASE__ = prepare_img() if "vistas" in model_name: SCREAMING_SNAKE_CASE__ = 65 elif "cityscapes" in model_name: SCREAMING_SNAKE_CASE__ = 65_535 else: SCREAMING_SNAKE_CASE__ = 255 SCREAMING_SNAKE_CASE__ = True if """ade""" in model_name else False SCREAMING_SNAKE_CASE__ = MaskFormerImageProcessor(ignore_index=UpperCamelCase__ , reduce_labels=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ = image_processor(UpperCamelCase__ , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE__ = model(**UpperCamelCase__ ) print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": SCREAMING_SNAKE_CASE__ = torch.tensor( [[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' ) Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) image_processor.save_pretrained(UpperCamelCase__ ) if push_to_hub: print("""Pushing model and image processor to the hub...""" ) model.push_to_hub(f'''nielsr/{model_name}''' ) image_processor.push_to_hub(f'''nielsr/{model_name}''' ) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='maskformer-swin-tiny-ade', type=str, help=('Name of the MaskFormer model you\'d like to convert',), ) parser.add_argument( '--checkpoint_path', default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl', type=str, help='Path to the original state dict (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) _lowerCamelCase = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
6
0