code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" import os def _UpperCAmelCase ( lowerCamelCase__ = "input.txt" ): """simple docstring""" with open(os.path.join(os.path.dirname(lowerCamelCase__ ) , lowerCamelCase__ ) ) as input_file: lowerCAmelCase__ = [ [int(lowerCamelCase__ ) for element in line.split(""",""" )] for line in input_file.readlines() ] lowerCAmelCase__ = len(lowerCamelCase__ ) lowerCAmelCase__ = len(matrix[0] ) lowerCAmelCase__ = [[-1 for _ in range(lowerCamelCase__ )] for _ in range(lowerCamelCase__ )] for i in range(lowerCamelCase__ ): lowerCAmelCase__ = matrix[i][0] for j in range(1 , lowerCamelCase__ ): for i in range(lowerCamelCase__ ): lowerCAmelCase__ = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , lowerCamelCase__ ): lowerCAmelCase__ = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): lowerCAmelCase__ = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(F"{solution() = }")
674
"""simple docstring""" import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = MobileBertConfig.from_json_file(lowerCamelCase__ ) print(f"""Building PyTorch model from configuration: {config}""" ) lowerCAmelCase__ = MobileBertForPreTraining(lowerCamelCase__ ) # Load weights from tf checkpoint lowerCAmelCase__ = load_tf_weights_in_mobilebert(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , lowerCamelCase__ ) if __name__ == "__main__": __lowerCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--mobilebert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained MobileBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __lowerCAmelCase : Optional[int] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
674
1
"""simple docstring""" def _UpperCAmelCase ( lowerCamelCase__ = 400_0000 ): """simple docstring""" lowerCAmelCase__ = [] lowerCAmelCase__ , lowerCAmelCase__ = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(lowerCamelCase__ ) lowerCAmelCase__ , lowerCAmelCase__ = b, a + b return sum(lowerCamelCase__ ) if __name__ == "__main__": print(F"{solution() = }")
674
"""simple docstring""" def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), f"""The input value of [n={number}] is not an integer""" if number == 1: return 2 elif number < 1: lowerCAmelCase__ = f"""The input value of [n={number}] has to be > 0""" raise ValueError(lowerCamelCase__ ) else: lowerCAmelCase__ = sylvester(number - 1 ) lowerCAmelCase__ = num - 1 lowerCAmelCase__ = num return lower * upper + 1 if __name__ == "__main__": print(F"The 8th number in Sylvester's sequence: {sylvester(8)}")
674
1
"""simple docstring""" import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" return x + 2 class a_ ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : Any ): lowerCAmelCase__ = """x = 3""" lowerCAmelCase__ = {} lowerCAmelCase__ = evaluate(snake_case__ , {} , state=snake_case__ ) assert result == 3 self.assertDictEqual(snake_case__ , {"""x""": 3} ) lowerCAmelCase__ = """x = y""" lowerCAmelCase__ = {"""y""": 5} lowerCAmelCase__ = evaluate(snake_case__ , {} , state=snake_case__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(snake_case__ , {"""x""": 5, """y""": 5} ) def _SCREAMING_SNAKE_CASE ( self : int ): lowerCAmelCase__ = """y = add_two(x)""" lowerCAmelCase__ = {"""x""": 3} lowerCAmelCase__ = evaluate(snake_case__ , {"""add_two""": add_two} , state=snake_case__ ) assert result == 5 self.assertDictEqual(snake_case__ , {"""x""": 3, """y""": 5} ) # Won't work without the tool with CaptureStdout() as out: lowerCAmelCase__ = evaluate(snake_case__ , {} , state=snake_case__ ) assert result is None assert "tried to execute add_two" in out.out def _SCREAMING_SNAKE_CASE ( self : List[Any] ): lowerCAmelCase__ = """x = 3""" lowerCAmelCase__ = {} lowerCAmelCase__ = evaluate(snake_case__ , {} , state=snake_case__ ) assert result == 3 self.assertDictEqual(snake_case__ , {"""x""": 3} ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): lowerCAmelCase__ = """test_dict = {'x': x, 'y': add_two(x)}""" lowerCAmelCase__ = {"""x""": 3} lowerCAmelCase__ = evaluate(snake_case__ , {"""add_two""": add_two} , state=snake_case__ ) self.assertDictEqual(snake_case__ , {"""x""": 3, """y""": 5} ) self.assertDictEqual(snake_case__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def _SCREAMING_SNAKE_CASE ( self : Any ): lowerCAmelCase__ = """x = 3\ny = 5""" lowerCAmelCase__ = {} lowerCAmelCase__ = evaluate(snake_case__ , {} , state=snake_case__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(snake_case__ , {"""x""": 3, """y""": 5} ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): lowerCAmelCase__ = """text = f'This is x: {x}.'""" lowerCAmelCase__ = {"""x""": 3} lowerCAmelCase__ = evaluate(snake_case__ , {} , state=snake_case__ ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(snake_case__ , {"""x""": 3, """text""": """This is x: 3."""} ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): lowerCAmelCase__ = """if x <= 3:\n y = 2\nelse:\n y = 5""" lowerCAmelCase__ = {"""x""": 3} lowerCAmelCase__ = evaluate(snake_case__ , {} , state=snake_case__ ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(snake_case__ , {"""x""": 3, """y""": 2} ) lowerCAmelCase__ = {"""x""": 8} lowerCAmelCase__ = evaluate(snake_case__ , {} , state=snake_case__ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(snake_case__ , {"""x""": 8, """y""": 5} ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): lowerCAmelCase__ = """test_list = [x, add_two(x)]""" lowerCAmelCase__ = {"""x""": 3} lowerCAmelCase__ = evaluate(snake_case__ , {"""add_two""": add_two} , state=snake_case__ ) self.assertListEqual(snake_case__ , [3, 5] ) self.assertDictEqual(snake_case__ , {"""x""": 3, """test_list""": [3, 5]} ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): lowerCAmelCase__ = """y = x""" lowerCAmelCase__ = {"""x""": 3} lowerCAmelCase__ = evaluate(snake_case__ , {} , state=snake_case__ ) assert result == 3 self.assertDictEqual(snake_case__ , {"""x""": 3, """y""": 3} ) def _SCREAMING_SNAKE_CASE ( self : Any ): lowerCAmelCase__ = """test_list = [x, add_two(x)]\ntest_list[1]""" lowerCAmelCase__ = {"""x""": 3} lowerCAmelCase__ = evaluate(snake_case__ , {"""add_two""": add_two} , state=snake_case__ ) assert result == 5 self.assertDictEqual(snake_case__ , {"""x""": 3, """test_list""": [3, 5]} ) lowerCAmelCase__ = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']""" lowerCAmelCase__ = {"""x""": 3} lowerCAmelCase__ = evaluate(snake_case__ , {"""add_two""": add_two} , state=snake_case__ ) assert result == 5 self.assertDictEqual(snake_case__ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): lowerCAmelCase__ = """x = 0\nfor i in range(3):\n x = i""" lowerCAmelCase__ = {} lowerCAmelCase__ = evaluate(snake_case__ , {"""range""": range} , state=snake_case__ ) assert result == 2 self.assertDictEqual(snake_case__ , {"""x""": 2, """i""": 2} )
674
"""simple docstring""" import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __lowerCAmelCase : Optional[Any] = get_tests_dir("fixtures/test_sentencepiece_no_bos.model") @require_sentencepiece @require_tokenizers class a_ ( __UpperCamelCase , unittest.TestCase ): UpperCamelCase_ : Tuple = PegasusTokenizer UpperCamelCase_ : Any = PegasusTokenizerFast UpperCamelCase_ : int = True UpperCamelCase_ : Any = True def _SCREAMING_SNAKE_CASE ( self : Tuple ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ = PegasusTokenizer(snake_case__ ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): return PegasusTokenizer.from_pretrained("""google/pegasus-large""" ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **snake_case__ : Optional[Any] ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Optional[Any] ): return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): lowerCAmelCase__ = """</s>""" lowerCAmelCase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<pad>""" ) self.assertEqual(vocab_keys[1] , """</s>""" ) self.assertEqual(vocab_keys[-1] , """v""" ) self.assertEqual(len(snake_case__ ) , 1103 ) def _SCREAMING_SNAKE_CASE ( self : Any ): self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def _SCREAMING_SNAKE_CASE ( self : Dict ): lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ = ( """Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important""" """ </s> <pad> <pad> <pad>""" ) lowerCAmelCase__ = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] lowerCAmelCase__ = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Dict ): lowerCAmelCase__ = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word lowerCAmelCase__ = """<mask_1> To ensure a <mask_2> flow of bank resolutions.""" lowerCAmelCase__ = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1] lowerCAmelCase__ = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : int ): lowerCAmelCase__ = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 96103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 lowerCAmelCase__ = """To ensure a smooth flow of bank resolutions.""" lowerCAmelCase__ = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1] lowerCAmelCase__ = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def _SCREAMING_SNAKE_CASE ( self : List[str] ): lowerCAmelCase__ = ["""This is going to be way too long.""" * 150, """short example"""] lowerCAmelCase__ = ["""not super long but more than 5 tokens""", """tiny"""] lowerCAmelCase__ = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" ) lowerCAmelCase__ = self._large_tokenizer( text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(snake_case__ ) == 2 # input_ids, attention_mask. @slow def _SCREAMING_SNAKE_CASE ( self : str ): # fmt: off lowerCAmelCase__ = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case__ , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , ) @require_sentencepiece @require_tokenizers class a_ ( __UpperCamelCase , unittest.TestCase ): UpperCamelCase_ : str = PegasusTokenizer UpperCamelCase_ : Optional[int] = PegasusTokenizerFast UpperCamelCase_ : Union[str, Any] = True UpperCamelCase_ : Optional[int] = True def _SCREAMING_SNAKE_CASE ( self : List[str] ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ = PegasusTokenizer(snake_case__ , offset=0 , mask_token_sent=snake_case__ , mask_token="""[MASK]""" ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : Dict ): return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **snake_case__ : List[Any] ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Dict ): return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : List[str] ): lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ = ( """Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>""" """ <pad> <pad> <pad>""" ) lowerCAmelCase__ = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] lowerCAmelCase__ = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) @require_torch def _SCREAMING_SNAKE_CASE ( self : List[Any] ): lowerCAmelCase__ = ["""This is going to be way too long.""" * 1000, """short example"""] lowerCAmelCase__ = ["""not super long but more than 5 tokens""", """tiny"""] lowerCAmelCase__ = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" ) lowerCAmelCase__ = self._large_tokenizer( text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(snake_case__ ) == 2 # input_ids, attention_mask. def _SCREAMING_SNAKE_CASE ( self : List[Any] ): lowerCAmelCase__ = ( """This is an example string that is used to test the original TF implementation against the HF""" """ implementation""" ) lowerCAmelCase__ = self._large_tokenizer(snake_case__ ).input_ids self.assertListEqual( snake_case__ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
674
1
"""simple docstring""" from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
674
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class a_ ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : int ): if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=snake_case__ , ) assert hasattr(self , """env""" ) def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Optional[Any] ): # configuration for running training on smdistributed Model Parallel lowerCAmelCase__ = { """enabled""": True, """processes_per_host""": 8, } lowerCAmelCase__ = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } lowerCAmelCase__ = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} lowerCAmelCase__ = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case__ , instance_type=self.instance_type , debugger_hook_config=snake_case__ , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 500, } , metric_definitions=self.env.metric_definitions , distribution=snake_case__ , py_version="""py36""" , ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : str ): TrainingJobAnalytics(snake_case__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : List[str] ): # create estimator lowerCAmelCase__ = self.create_estimator(snake_case__ ) # run training estimator.fit() # result dataframe lowerCAmelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) lowerCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCAmelCase__ = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , snake_case__ )
674
1
"""simple docstring""" import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu __lowerCAmelCase : Union[str, Any] = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json" with io.open(filename, "r", encoding="utf-8") as f: __lowerCAmelCase : Optional[int] = json.load(f) @require_torch class a_ ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Dict ): return FSMTTokenizer.from_pretrained(snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Any ): lowerCAmelCase__ = FSMTForConditionalGeneration.from_pretrained(snake_case__ ).to(snake_case__ ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ["""en-ru""", 26.0], ["""ru-en""", 22.0], ["""en-de""", 22.0], ["""de-en""", 29.0], ] ) @slow def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Any , snake_case__ : int ): # note: this test is not testing the best performance since it only evals a small batch # but it should be enough to detect a regression in the output quality lowerCAmelCase__ = F"""facebook/wmt19-{pair}""" lowerCAmelCase__ = self.get_tokenizer(snake_case__ ) lowerCAmelCase__ = self.get_model(snake_case__ ) lowerCAmelCase__ = bleu_data[pair]["""src"""] lowerCAmelCase__ = bleu_data[pair]["""tgt"""] lowerCAmelCase__ = tokenizer(snake_case__ , return_tensors="""pt""" , truncation=snake_case__ , padding="""longest""" ).to(snake_case__ ) lowerCAmelCase__ = model.generate( input_ids=batch.input_ids , num_beams=8 , ) lowerCAmelCase__ = tokenizer.batch_decode( snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ ) lowerCAmelCase__ = calculate_bleu(snake_case__ , snake_case__ ) print(snake_case__ ) self.assertGreaterEqual(scores["""bleu"""] , snake_case__ )
674
"""simple docstring""" from math import pi, sqrt def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" if num <= 0: raise ValueError("""math domain error""" ) if num > 1_71.5: raise OverflowError("""math range error""" ) elif num - int(lowerCamelCase__ ) not in (0, 0.5): raise NotImplementedError("""num must be an integer or a half-integer""" ) elif num == 0.5: return sqrt(lowerCamelCase__ ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def _UpperCAmelCase ( ): """simple docstring""" assert gamma(0.5 ) == sqrt(lowerCamelCase__ ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() __lowerCAmelCase : Dict = 1.0 while num: __lowerCAmelCase : Any = float(input("Gamma of: ")) print(F"gamma({num}) = {gamma(num)}") print("\nEnter 0 to exit...")
674
1
"""simple docstring""" from __future__ import annotations __lowerCAmelCase : List[Any] = 8.9_8_8e9 # units = N * m^s * C^-2 def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = abs(chargea * chargea ) if (force, chargea, chargea, distance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if distance < 0: raise ValueError("""Distance cannot be negative""" ) if force == 0: lowerCAmelCase__ = COULOMBS_CONSTANT * charge_product / (distance**2) return {"force": force} elif chargea == 0: lowerCAmelCase__ = abs(lowerCamelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge1": chargea} elif chargea == 0: lowerCAmelCase__ = abs(lowerCamelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge2": chargea} elif distance == 0: lowerCAmelCase__ = (COULOMBS_CONSTANT * charge_product / abs(lowerCamelCase__ )) ** 0.5 return {"distance": distance} raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
674
"""simple docstring""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class a_ : def __init__( self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Any=13 , snake_case__ : int=30 , snake_case__ : int=2 , snake_case__ : Union[str, Any]=3 , snake_case__ : Dict=True , snake_case__ : Optional[int]=True , snake_case__ : List[Any]=32 , snake_case__ : List[str]=2 , snake_case__ : Optional[Any]=4 , snake_case__ : Optional[int]=37 , snake_case__ : Tuple="gelu" , snake_case__ : str=0.1 , snake_case__ : Any=0.1 , snake_case__ : int=10 , snake_case__ : Dict=0.02 , snake_case__ : Union[str, Any]=3 , snake_case__ : str=None , snake_case__ : List[Any]=2 , ): lowerCAmelCase__ = parent lowerCAmelCase__ = batch_size lowerCAmelCase__ = image_size lowerCAmelCase__ = patch_size lowerCAmelCase__ = num_channels lowerCAmelCase__ = is_training lowerCAmelCase__ = use_labels lowerCAmelCase__ = hidden_size lowerCAmelCase__ = num_hidden_layers lowerCAmelCase__ = num_attention_heads lowerCAmelCase__ = intermediate_size lowerCAmelCase__ = hidden_act lowerCAmelCase__ = hidden_dropout_prob lowerCAmelCase__ = attention_probs_dropout_prob lowerCAmelCase__ = type_sequence_label_size lowerCAmelCase__ = initializer_range lowerCAmelCase__ = scope lowerCAmelCase__ = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) lowerCAmelCase__ = (image_size // patch_size) ** 2 lowerCAmelCase__ = num_patches + 2 def _SCREAMING_SNAKE_CASE ( self : Any ): lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ = None if self.use_labels: lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : List[Any] ): return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : List[str] ): lowerCAmelCase__ = TFDeiTModel(config=snake_case__ ) lowerCAmelCase__ = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict ): lowerCAmelCase__ = TFDeiTForMaskedImageModeling(config=snake_case__ ) lowerCAmelCase__ = model(snake_case__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCAmelCase__ = 1 lowerCAmelCase__ = TFDeiTForMaskedImageModeling(snake_case__ ) lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase__ = model(snake_case__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Tuple ): lowerCAmelCase__ = self.type_sequence_label_size lowerCAmelCase__ = TFDeiTForImageClassification(snake_case__ ) lowerCAmelCase__ = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCAmelCase__ = 1 lowerCAmelCase__ = TFDeiTForImageClassification(snake_case__ ) lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase__ = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): lowerCAmelCase__ = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs lowerCAmelCase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class a_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ): UpperCamelCase_ : Optional[Any] = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) UpperCamelCase_ : Any = ( { "feature-extraction": TFDeiTModel, "image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) UpperCamelCase_ : Optional[Any] = False UpperCamelCase_ : Optional[Any] = False UpperCamelCase_ : Optional[int] = False UpperCamelCase_ : int = False def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): lowerCAmelCase__ = TFDeiTModelTester(self ) lowerCAmelCase__ = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="""DeiT does not use inputs_embeds""" ) def _SCREAMING_SNAKE_CASE ( self : Any ): pass def _SCREAMING_SNAKE_CASE ( self : str ): lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ = model_class(snake_case__ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) lowerCAmelCase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case__ , tf.keras.layers.Dense ) ) def _SCREAMING_SNAKE_CASE ( self : Dict ): lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ = model_class(snake_case__ ) lowerCAmelCase__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ = [*signature.parameters.keys()] lowerCAmelCase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : int ): lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : int ): lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Union[str, Any]=False ): lowerCAmelCase__ = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def _SCREAMING_SNAKE_CASE ( self : Any ): for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ = TFDeiTModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def _UpperCAmelCase ( ): """simple docstring""" lowerCAmelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class a_ ( unittest.TestCase ): @cached_property def _SCREAMING_SNAKE_CASE ( self : Any ): return ( DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ) if is_vision_available() else None ) @slow def _SCREAMING_SNAKE_CASE ( self : List[Any] ): lowerCAmelCase__ = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ) lowerCAmelCase__ = self.default_image_processor lowerCAmelCase__ = prepare_img() lowerCAmelCase__ = image_processor(images=snake_case__ , return_tensors="""tf""" ) # forward pass lowerCAmelCase__ = model(**snake_case__ ) # verify the logits lowerCAmelCase__ = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , snake_case__ ) lowerCAmelCase__ = tf.constant([-1.0266, 0.1912, -1.2861] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
674
1
"""simple docstring""" import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class a_ : def __init__( self : Optional[int] , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=64 , snake_case__ : Any=None ): lowerCAmelCase__ = np.random.default_rng(snake_case__ ) lowerCAmelCase__ = length lowerCAmelCase__ = rng.normal(size=(length,) ).astype(np.floataa ) lowerCAmelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self : Optional[Any] ): return self.length def __getitem__( self : List[str] , snake_case__ : Optional[int] ): return {"x": self.x[i], "y": self.y[i]} class a_ ( torch.nn.Module ): def __init__( self : List[str] , snake_case__ : str=0 , snake_case__ : Dict=0 , snake_case__ : Any=False ): super().__init__() lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) lowerCAmelCase__ = True def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Any=None ): if self.first_batch: print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" ) lowerCAmelCase__ = False return x * self.a[0] + self.b[0] class a_ ( torch.nn.Module ): def __init__( self : Any , snake_case__ : Union[str, Any]=0 , snake_case__ : Union[str, Any]=0 , snake_case__ : List[Any]=False ): super().__init__() lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(snake_case__ ).float() ) lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(snake_case__ ).float() ) lowerCAmelCase__ = True def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Optional[Any]=None ): if self.first_batch: print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" ) lowerCAmelCase__ = False return x * self.a + self.b def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 16 ): """simple docstring""" from datasets import load_dataset from transformers import AutoTokenizer lowerCAmelCase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" ) lowerCAmelCase__ = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""} lowerCAmelCase__ = load_dataset("""csv""" , data_files=lowerCamelCase__ ) lowerCAmelCase__ = datasets["""train"""].unique("""label""" ) lowerCAmelCase__ = {v: i for i, v in enumerate(lowerCamelCase__ )} def tokenize_function(lowerCamelCase__ ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase__ = tokenizer( examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" ) if "label" in examples: lowerCAmelCase__ = [label_to_id[l] for l in examples["""label"""]] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowerCAmelCase__ = datasets.map( lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["""sentence1""", """sentence2""", """label"""] , ) def collate_fn(lowerCamelCase__ ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowerCamelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return tokenizer.pad(lowerCamelCase__ , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. lowerCAmelCase__ = DataLoader(tokenized_datasets["""train"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=2 ) lowerCAmelCase__ = DataLoader(tokenized_datasets["""validation"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=1 ) return train_dataloader, eval_dataloader
674
"""simple docstring""" from __future__ import annotations from math import gcd def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 2 , lowerCamelCase__ = 1 , lowerCamelCase__ = 3 , ): """simple docstring""" if num < 2: raise ValueError("""The input value cannot be less than 2""" ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int: return (pow(lowerCamelCase__ , 2 ) + step) % modulus for _ in range(lowerCamelCase__ ): # These track the position within the cycle detection logic. lowerCAmelCase__ = seed lowerCAmelCase__ = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. lowerCAmelCase__ = gcd(hare - tortoise , lowerCamelCase__ ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. lowerCAmelCase__ = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse __lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( "num", type=int, help="The value to find a divisor of", ) parser.add_argument( "--attempts", type=int, default=3, help="The number of attempts before giving up", ) __lowerCAmelCase : List[str] = parser.parse_args() __lowerCAmelCase : Dict = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(F"{args.num} is probably prime") else: __lowerCAmelCase : List[str] = args.num // divisor print(F"{args.num} = {divisor} * {quotient}")
674
1
"""simple docstring""" import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class a_ ( __UpperCamelCase ): UpperCamelCase_ : int = (IPNDMScheduler,) UpperCamelCase_ : Optional[int] = (("num_inference_steps", 50),) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **snake_case__ : Union[str, Any] ): lowerCAmelCase__ = {"""num_train_timesteps""": 1000} config.update(**snake_case__ ) return config def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : Optional[int]=0 , **snake_case__ : Dict ): lowerCAmelCase__ = dict(self.forward_default_kwargs ) lowerCAmelCase__ = kwargs.pop("""num_inference_steps""" , snake_case__ ) lowerCAmelCase__ = self.dummy_sample lowerCAmelCase__ = 0.1 * sample lowerCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowerCAmelCase__ = self.get_scheduler_config(**snake_case__ ) lowerCAmelCase__ = scheduler_class(**snake_case__ ) scheduler.set_timesteps(snake_case__ ) # copy over dummy past residuals lowerCAmelCase__ = dummy_past_residuals[:] if time_step is None: lowerCAmelCase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(snake_case__ ) lowerCAmelCase__ = scheduler_class.from_pretrained(snake_case__ ) new_scheduler.set_timesteps(snake_case__ ) # copy over dummy past residuals lowerCAmelCase__ = dummy_past_residuals[:] lowerCAmelCase__ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample lowerCAmelCase__ = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" lowerCAmelCase__ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample lowerCAmelCase__ = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _SCREAMING_SNAKE_CASE ( self : int ): pass def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Optional[Any]=0 , **snake_case__ : Optional[int] ): lowerCAmelCase__ = dict(self.forward_default_kwargs ) lowerCAmelCase__ = kwargs.pop("""num_inference_steps""" , snake_case__ ) lowerCAmelCase__ = self.dummy_sample lowerCAmelCase__ = 0.1 * sample lowerCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: lowerCAmelCase__ = self.get_scheduler_config() lowerCAmelCase__ = scheduler_class(**snake_case__ ) scheduler.set_timesteps(snake_case__ ) # copy over dummy past residuals (must be after setting timesteps) lowerCAmelCase__ = dummy_past_residuals[:] if time_step is None: lowerCAmelCase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(snake_case__ ) lowerCAmelCase__ = scheduler_class.from_pretrained(snake_case__ ) # copy over dummy past residuals new_scheduler.set_timesteps(snake_case__ ) # copy over dummy past residual (must be after setting timesteps) lowerCAmelCase__ = dummy_past_residuals[:] lowerCAmelCase__ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample lowerCAmelCase__ = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" lowerCAmelCase__ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample lowerCAmelCase__ = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _SCREAMING_SNAKE_CASE ( self : Any , **snake_case__ : Optional[int] ): lowerCAmelCase__ = self.scheduler_classes[0] lowerCAmelCase__ = self.get_scheduler_config(**snake_case__ ) lowerCAmelCase__ = scheduler_class(**snake_case__ ) lowerCAmelCase__ = 10 lowerCAmelCase__ = self.dummy_model() lowerCAmelCase__ = self.dummy_sample_deter scheduler.set_timesteps(snake_case__ ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ = model(snake_case__ , snake_case__ ) lowerCAmelCase__ = scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ = model(snake_case__ , snake_case__ ) lowerCAmelCase__ = scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample return sample def _SCREAMING_SNAKE_CASE ( self : Dict ): lowerCAmelCase__ = dict(self.forward_default_kwargs ) lowerCAmelCase__ = kwargs.pop("""num_inference_steps""" , snake_case__ ) for scheduler_class in self.scheduler_classes: lowerCAmelCase__ = self.get_scheduler_config() lowerCAmelCase__ = scheduler_class(**snake_case__ ) lowerCAmelCase__ = self.dummy_sample lowerCAmelCase__ = 0.1 * sample if num_inference_steps is not None and hasattr(snake_case__ , """set_timesteps""" ): scheduler.set_timesteps(snake_case__ ) elif num_inference_steps is not None and not hasattr(snake_case__ , """set_timesteps""" ): lowerCAmelCase__ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) lowerCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] lowerCAmelCase__ = dummy_past_residuals[:] lowerCAmelCase__ = scheduler.timesteps[5] lowerCAmelCase__ = scheduler.timesteps[6] lowerCAmelCase__ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample lowerCAmelCase__ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) lowerCAmelCase__ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample lowerCAmelCase__ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=snake_case__ , time_step=snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Any ): for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=snake_case__ , time_step=snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): lowerCAmelCase__ = self.full_loop() lowerCAmelCase__ = torch.mean(torch.abs(snake_case__ ) ) assert abs(result_mean.item() - 2540529 ) < 10
674
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = TapasConfig.from_json_file(lowerCamelCase__ ) # set absolute/relative position embeddings parameter lowerCAmelCase__ = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ ) elif task == "WTQ": # run_task_main.py hparams lowerCAmelCase__ = 4 lowerCAmelCase__ = True # hparam_utils.py hparams lowerCAmelCase__ = 0.66_46_94 lowerCAmelCase__ = 0.20_79_51 lowerCAmelCase__ = 0.12_11_94 lowerCAmelCase__ = True lowerCAmelCase__ = True lowerCAmelCase__ = False lowerCAmelCase__ = 0.0_35_25_13 lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams lowerCAmelCase__ = 4 lowerCAmelCase__ = False # hparam_utils.py hparams lowerCAmelCase__ = 36.45_19 lowerCAmelCase__ = 0.90_34_21 lowerCAmelCase__ = 2_22.0_88 lowerCAmelCase__ = True lowerCAmelCase__ = True lowerCAmelCase__ = True lowerCAmelCase__ = 0.76_31_41 lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ ) elif task == "TABFACT": lowerCAmelCase__ = TapasForSequenceClassification(config=lowerCamelCase__ ) elif task == "MLM": lowerCAmelCase__ = TapasForMaskedLM(config=lowerCamelCase__ ) elif task == "INTERMEDIATE_PRETRAINING": lowerCAmelCase__ = TapasModel(config=lowerCamelCase__ ) else: raise ValueError(f"""Task {task} not supported.""" ) print(f"""Building PyTorch model from configuration: {config}""" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Save pytorch-model (weights and configuration) print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(lowerCamelCase__ ) # Save tokenizer files print(f"""Save tokenizer files to {pytorch_dump_path}""" ) lowerCAmelCase__ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 ) tokenizer.save_pretrained(lowerCamelCase__ ) print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": __lowerCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA." ) parser.add_argument( "--reset_position_index_per_cell", default=False, action="store_true", help="Whether to use relative position embeddings or not. Defaults to True.", ) parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--tapas_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained TAPAS model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __lowerCAmelCase : Union[str, Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
674
1
"""simple docstring""" import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True , lowerCamelCase__="pt" ): """simple docstring""" lowerCAmelCase__ = {"""add_prefix_space""": True} if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and not line.startswith(""" """ ) else {} lowerCAmelCase__ = padding_side return tokenizer( [line] , max_length=lowerCamelCase__ , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase__ , return_tensors=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , ): """simple docstring""" lowerCAmelCase__ = input_ids.ne(lowerCamelCase__ ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class a_ ( __UpperCamelCase ): def __init__( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[int]="train" , snake_case__ : Union[str, Any]=None , snake_case__ : List[Any]=None , snake_case__ : Union[str, Any]=None , snake_case__ : Optional[Any]="" , ): super().__init__() lowerCAmelCase__ = Path(snake_case__ ).joinpath(type_path + """.source""" ) lowerCAmelCase__ = Path(snake_case__ ).joinpath(type_path + """.target""" ) lowerCAmelCase__ = self.get_char_lens(self.src_file ) lowerCAmelCase__ = max_source_length lowerCAmelCase__ = max_target_length assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}""" lowerCAmelCase__ = tokenizer lowerCAmelCase__ = prefix if n_obs is not None: lowerCAmelCase__ = self.src_lens[:n_obs] lowerCAmelCase__ = src_lang lowerCAmelCase__ = tgt_lang def __len__( self : Tuple ): return len(self.src_lens ) def __getitem__( self : Any , snake_case__ : Union[str, Any] ): lowerCAmelCase__ = index + 1 # linecache starts at 1 lowerCAmelCase__ = self.prefix + linecache.getline(str(self.src_file ) , snake_case__ ).rstrip("""\n""" ) lowerCAmelCase__ = linecache.getline(str(self.tgt_file ) , snake_case__ ).rstrip("""\n""" ) assert source_line, F"""empty source line for index {index}""" assert tgt_line, F"""empty tgt line for index {index}""" # Need to add eos token manually for T5 if isinstance(self.tokenizer , snake_case__ ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right lowerCAmelCase__ = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer ) lowerCAmelCase__ = self.tokenizer.generator if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer lowerCAmelCase__ = encode_line(snake_case__ , snake_case__ , self.max_source_length , """right""" ) lowerCAmelCase__ = encode_line(snake_case__ , snake_case__ , self.max_target_length , """right""" ) lowerCAmelCase__ = source_inputs["""input_ids"""].squeeze() lowerCAmelCase__ = target_inputs["""input_ids"""].squeeze() lowerCAmelCase__ = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def _SCREAMING_SNAKE_CASE ( snake_case__ : Union[str, Any] ): return [len(snake_case__ ) for x in Path(snake_case__ ).open().readlines()] def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Optional[Any] ): lowerCAmelCase__ = torch.stack([x["""input_ids"""] for x in batch] ) lowerCAmelCase__ = torch.stack([x["""attention_mask"""] for x in batch] ) lowerCAmelCase__ = torch.stack([x["""decoder_input_ids"""] for x in batch] ) lowerCAmelCase__ = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer.pad_token_id ) lowerCAmelCase__ = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer.pad_token_id ) lowerCAmelCase__ = trim_batch(snake_case__ , snake_case__ ) lowerCAmelCase__ , lowerCAmelCase__ = trim_batch(snake_case__ , snake_case__ , attention_mask=snake_case__ ) lowerCAmelCase__ = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch __lowerCAmelCase : Optional[int] = getLogger(__name__) def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" return list(itertools.chain.from_iterable(lowerCamelCase__ ) ) def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = get_git_info() save_json(lowerCamelCase__ , os.path.join(lowerCamelCase__ , """git_log.json""" ) ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=4 , **lowerCamelCase__ ): """simple docstring""" with open(lowerCamelCase__ , """w""" ) as f: json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=lowerCamelCase__ , **lowerCamelCase__ ) def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" with open(lowerCamelCase__ ) as f: return json.load(lowerCamelCase__ ) def _UpperCAmelCase ( ): """simple docstring""" lowerCAmelCase__ = git.Repo(search_parent_directories=lowerCamelCase__ ) lowerCAmelCase__ = { """repo_id""": str(lowerCamelCase__ ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), """hostname""": str(socket.gethostname() ), } return repo_infos def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" return list(map(lowerCamelCase__ , lowerCamelCase__ ) ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" with open(lowerCamelCase__ , """wb""" ) as f: return pickle.dump(lowerCamelCase__ , lowerCamelCase__ ) def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" def remove_articles(lowerCamelCase__ ): return re.sub(r"""\b(a|an|the)\b""" , """ """ , lowerCamelCase__ ) def white_space_fix(lowerCamelCase__ ): return " ".join(text.split() ) def remove_punc(lowerCamelCase__ ): lowerCAmelCase__ = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowerCamelCase__ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase__ ) ) ) ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = normalize_answer(lowerCamelCase__ ).split() lowerCAmelCase__ = normalize_answer(lowerCamelCase__ ).split() lowerCAmelCase__ = Counter(lowerCamelCase__ ) & Counter(lowerCamelCase__ ) lowerCAmelCase__ = sum(common.values() ) if num_same == 0: return 0 lowerCAmelCase__ = 1.0 * num_same / len(lowerCamelCase__ ) lowerCAmelCase__ = 1.0 * num_same / len(lowerCamelCase__ ) lowerCAmelCase__ = (2 * precision * recall) / (precision + recall) return fa def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" return normalize_answer(lowerCamelCase__ ) == normalize_answer(lowerCamelCase__ ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" assert len(lowerCamelCase__ ) == len(lowerCamelCase__ ) lowerCAmelCase__ = 0 for hypo, pred in zip(lowerCamelCase__ , lowerCamelCase__ ): em += exact_match_score(lowerCamelCase__ , lowerCamelCase__ ) if len(lowerCamelCase__ ) > 0: em /= len(lowerCamelCase__ ) return {"em": em} def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" return model_prefix.startswith("""rag""" ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead lowerCAmelCase__ = """dropout_rate""" for p in extra_params: if getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and not hasattr(lowerCamelCase__ , equivalent_param[p] ): logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase__ ) ) delattr(lowerCamelCase__ , lowerCamelCase__ ) continue lowerCAmelCase__ = p if hasattr(lowerCamelCase__ , lowerCamelCase__ ) else equivalent_param[p] setattr(lowerCamelCase__ , lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) ) delattr(lowerCamelCase__ , lowerCamelCase__ ) return hparams, config
674
"""simple docstring""" def _UpperCAmelCase ( lowerCamelCase__ = 50 ): """simple docstring""" lowerCAmelCase__ = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(F"{solution() = }")
674
1
"""simple docstring""" import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient __lowerCAmelCase : List[str] = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"]) def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = test_results.split(""" """ ) lowerCAmelCase__ = 0 lowerCAmelCase__ = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. lowerCAmelCase__ = expressions[-2] if """=""" in expressions[-1] else expressions[-1] for i, expression in enumerate(lowerCamelCase__ ): if "failed" in expression: failed += int(expressions[i - 1] ) if "passed" in expression: success += int(expressions[i - 1] ) return failed, success, time_spent def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = {} lowerCAmelCase__ = None lowerCAmelCase__ = False for line in failures_short_lines.split("""\n""" ): if re.search(r"""_ \[doctest\]""" , lowerCamelCase__ ): lowerCAmelCase__ = True lowerCAmelCase__ = line.split(""" """ )[2] elif in_error and not line.split(""" """ )[0].isdigit(): lowerCAmelCase__ = line lowerCAmelCase__ = False return failures class a_ : def __init__( self : Union[str, Any] , snake_case__ : str , snake_case__ : Dict ): lowerCAmelCase__ = title lowerCAmelCase__ = doc_test_results["""time_spent"""].split(""",""" )[0] lowerCAmelCase__ = doc_test_results["""success"""] lowerCAmelCase__ = doc_test_results["""failures"""] lowerCAmelCase__ = self.n_success + self.n_failures # Failures and success of the modeling tests lowerCAmelCase__ = doc_test_results @property def _SCREAMING_SNAKE_CASE ( self : Any ): lowerCAmelCase__ = [self._time_spent] lowerCAmelCase__ = 0 for time in time_spent: lowerCAmelCase__ = time.split(""":""" ) # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(snake_case__ ) == 1: lowerCAmelCase__ = [0, 0, time_parts[0]] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] ) total_secs += hours * 3600 + minutes * 60 + seconds lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60 return F"""{int(snake_case__ )}h{int(snake_case__ )}m{int(snake_case__ )}s""" @property def _SCREAMING_SNAKE_CASE ( self : int ): return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def _SCREAMING_SNAKE_CASE ( self : Any ): return { "type": "section", "text": { "type": "plain_text", "text": F"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""", "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""", }, } @property def _SCREAMING_SNAKE_CASE ( self : Dict ): return { "type": "section", "text": { "type": "plain_text", "text": ( F"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in""" F""" {self.time}.""" ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""", }, } @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): lowerCAmelCase__ = 40 lowerCAmelCase__ = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(snake_case__ , snake_case__ )} lowerCAmelCase__ = """""" for category, failures in category_failures.items(): if len(snake_case__ ) == 0: continue if report != "": report += "\n\n" report += F"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n" report += "`" report += "`\n`".join(snake_case__ ) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": F"""The following examples had failures:\n\n\n{report}\n""", }, } @property def _SCREAMING_SNAKE_CASE ( self : List[str] ): lowerCAmelCase__ = [self.header] if self.n_failures > 0: blocks.append(self.failures ) if self.n_failures > 0: blocks.extend([self.category_failures] ) if self.n_failures == 0: blocks.append(self.no_failures ) return json.dumps(snake_case__ ) @staticmethod def _SCREAMING_SNAKE_CASE ( ): lowerCAmelCase__ = [ { """type""": """section""", """text""": { """type""": """plain_text""", """text""": """There was an issue running the tests.""", }, """accessory""": { """type""": """button""", """text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True}, """url""": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""", }, } ] print("""Sending the following payload""" ) print(json.dumps({"""blocks""": json.loads(snake_case__ )} ) ) client.chat_postMessage( channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=snake_case__ , ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): print("""Sending the following payload""" ) print(json.dumps({"""blocks""": json.loads(self.payload )} ) ) lowerCAmelCase__ = F"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else """All tests passed.""" lowerCAmelCase__ = client.chat_postMessage( channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=snake_case__ , ) def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] ): lowerCAmelCase__ = """""" for key, value in failures.items(): lowerCAmelCase__ = value[:200] + """ [Truncated]""" if len(snake_case__ ) > 250 else value failures_text += F"""*{key}*\n_{value}_\n\n""" lowerCAmelCase__ = job_name lowerCAmelCase__ = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}} if job_link is not None: lowerCAmelCase__ = { """type""": """button""", """text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True}, """url""": job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): if self.thread_ts is None: raise ValueError("""Can only post reply if a post has been made.""" ) lowerCAmelCase__ = self.doc_test_results.pop("""job_link""" ) self.doc_test_results.pop("""failures""" ) self.doc_test_results.pop("""success""" ) self.doc_test_results.pop("""time_spent""" ) lowerCAmelCase__ = sorted(self.doc_test_results.items() , key=lambda snake_case__ : t[0] ) for job, job_result in sorted_dict: if len(job_result["""failures"""] ): lowerCAmelCase__ = F"""*Num failures* :{len(job_result['failed'] )} \n""" lowerCAmelCase__ = job_result["""failures"""] lowerCAmelCase__ = self.get_reply_blocks(snake_case__ , snake_case__ , snake_case__ , text=snake_case__ ) print("""Sending the following reply""" ) print(json.dumps({"""blocks""": blocks} ) ) client.chat_postMessage( channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=F"""Results for {job}""" , blocks=snake_case__ , thread_ts=self.thread_ts["""ts"""] , ) time.sleep(1 ) def _UpperCAmelCase ( ): """simple docstring""" lowerCAmelCase__ = os.environ["""GITHUB_RUN_ID"""] lowerCAmelCase__ = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100""" lowerCAmelCase__ = requests.get(lowerCamelCase__ ).json() lowerCAmelCase__ = {} try: jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} ) lowerCAmelCase__ = math.ceil((result["""total_count"""] - 100) / 100 ) for i in range(lowerCamelCase__ ): lowerCAmelCase__ = requests.get(url + f"""&page={i + 2}""" ).json() jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} ) return jobs except Exception as e: print("""Unknown error, could not fetch links.""" , lowerCamelCase__ ) return {} def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = {} if os.path.exists(lowerCamelCase__ ): lowerCAmelCase__ = os.listdir(lowerCamelCase__ ) for file in files: try: with open(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , encoding="""utf-8""" ) as f: lowerCAmelCase__ = f.read() except UnicodeDecodeError as e: raise ValueError(f"""Could not open {os.path.join(lowerCamelCase__ , lowerCamelCase__ )}.""" ) from e return _artifact def _UpperCAmelCase ( ): """simple docstring""" class a_ : def __init__( self : Optional[int] , snake_case__ : str ): lowerCAmelCase__ = name lowerCAmelCase__ = [] def __str__( self : List[str] ): return self.name def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : str ): self.paths.append({"""name""": self.name, """path""": path} ) lowerCAmelCase__ = {} lowerCAmelCase__ = filter(os.path.isdir , os.listdir() ) for directory in directories: lowerCAmelCase__ = directory if artifact_name not in _available_artifacts: lowerCAmelCase__ = Artifact(lowerCamelCase__ ) _available_artifacts[artifact_name].add_path(lowerCamelCase__ ) return _available_artifacts if __name__ == "__main__": __lowerCAmelCase : Union[str, Any] = get_job_links() __lowerCAmelCase : str = retrieve_available_artifacts() __lowerCAmelCase : Optional[int] = collections.OrderedDict( [ ("*.py", "API Examples"), ("*.md", "MD Examples"), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' __lowerCAmelCase : Optional[int] = { v: { "failed": [], "failures": {}, } for v in docs.values() } # Link to the GitHub Action job __lowerCAmelCase : Optional[int] = github_actions_job_links.get("run_doctests") __lowerCAmelCase : Any = available_artifacts["doc_tests_gpu_test_reports"].paths[0] __lowerCAmelCase : Dict = retrieve_artifact(artifact_path["name"]) if "stats" in artifact: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = handle_test_results(artifact["stats"]) __lowerCAmelCase : List[Any] = failed __lowerCAmelCase : Dict = success __lowerCAmelCase : List[str] = time_spent[1:-1] + ", " __lowerCAmelCase : Union[str, Any] = extract_first_line_failure(artifact["failures_short"]) for line in artifact["summary_short"].split("\n"): if re.search("FAILED", line): __lowerCAmelCase : Tuple = line.replace("FAILED ", "") __lowerCAmelCase : Any = line.split()[0].replace("\n", "") if "::" in line: __lowerCAmelCase , __lowerCAmelCase : List[Any] = line.split("::") else: __lowerCAmelCase , __lowerCAmelCase : Optional[Any] = line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): __lowerCAmelCase : Union[str, Any] = docs[file_regex] doc_test_results[category]["failed"].append(test) __lowerCAmelCase : List[str] = all_failures[test] if test in all_failures else "N/A" __lowerCAmelCase : Optional[int] = failure break __lowerCAmelCase : Optional[Any] = Message("🤗 Results of the doc tests.", doc_test_results) message.post() message.post_reply()
674
"""simple docstring""" import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse("0.8.3"): raise Exception("requires gluonnlp == 0.8.3") if version.parse(mx.__version__) != version.parse("1.5.0"): raise Exception("requires mxnet == 1.5.0") logging.set_verbosity_info() __lowerCAmelCase : Any = logging.get_logger(__name__) __lowerCAmelCase : Any = "The Nymphenburg Palace is a beautiful palace in Munich!" def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = { """attention_cell""": """multi_head""", """num_layers""": 4, """units""": 1024, """hidden_size""": 768, """max_length""": 512, """num_heads""": 8, """scaled""": True, """dropout""": 0.1, """use_residual""": True, """embed_size""": 1024, """embed_dropout""": 0.1, """word_embed""": None, """layer_norm_eps""": 1e-5, """token_type_vocab_size""": 2, } lowerCAmelCase__ = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py lowerCAmelCase__ = BERTEncoder( attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=lowerCamelCase__ , output_all_encodings=lowerCamelCase__ , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , lowerCamelCase__ ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later lowerCAmelCase__ = """openwebtext_ccnews_stories_books_cased""" # Specify download folder to Gluonnlp's vocab lowerCAmelCase__ = os.path.join(get_home_dir() , """models""" ) lowerCAmelCase__ = _load_vocab(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , cls=lowerCamelCase__ ) lowerCAmelCase__ = nlp.model.BERTModel( lowerCamelCase__ , len(lowerCamelCase__ ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=lowerCamelCase__ , use_token_type_embed=lowerCamelCase__ , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=lowerCamelCase__ , use_decoder=lowerCamelCase__ , ) original_bort.load_parameters(lowerCamelCase__ , cast_dtype=lowerCamelCase__ , ignore_extra=lowerCamelCase__ ) lowerCAmelCase__ = original_bort._collect_params_with_prefix() # Build our config 🤗 lowerCAmelCase__ = { """architectures""": ["""BertForMaskedLM"""], """attention_probs_dropout_prob""": predefined_args["""dropout"""], """hidden_act""": """gelu""", """hidden_dropout_prob""": predefined_args["""dropout"""], """hidden_size""": predefined_args["""embed_size"""], """initializer_range""": 0.02, """intermediate_size""": predefined_args["""hidden_size"""], """layer_norm_eps""": predefined_args["""layer_norm_eps"""], """max_position_embeddings""": predefined_args["""max_length"""], """model_type""": """bort""", """num_attention_heads""": predefined_args["""num_heads"""], """num_hidden_layers""": predefined_args["""num_layers"""], """pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa """type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa """vocab_size""": len(lowerCamelCase__ ), } lowerCAmelCase__ = BertConfig.from_dict(lowerCamelCase__ ) lowerCAmelCase__ = BertForMaskedLM(lowerCamelCase__ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(lowerCamelCase__ ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(lowerCamelCase__ , lowerCamelCase__ ): lowerCAmelCase__ = hf_param.shape lowerCAmelCase__ = to_torch(params[gluon_param] ) lowerCAmelCase__ = gluon_param.shape assert ( shape_hf == shape_gluon ), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers""" return gluon_param lowerCAmelCase__ = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" ) lowerCAmelCase__ = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" ) lowerCAmelCase__ = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" ) lowerCAmelCase__ = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) lowerCAmelCase__ = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): lowerCAmelCase__ = hf_bort_model.bert.encoder.layer[i] # self attention lowerCAmelCase__ = layer.attention.self lowerCAmelCase__ = check_and_map_params( self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" ) lowerCAmelCase__ = check_and_map_params( self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" ) lowerCAmelCase__ = check_and_map_params( self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" ) lowerCAmelCase__ = check_and_map_params( self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" ) lowerCAmelCase__ = check_and_map_params( self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" ) lowerCAmelCase__ = check_and_map_params( self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" ) # self attention output lowerCAmelCase__ = layer.attention.output lowerCAmelCase__ = check_and_map_params( self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" ) lowerCAmelCase__ = check_and_map_params( self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" ) lowerCAmelCase__ = check_and_map_params( self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" ) lowerCAmelCase__ = check_and_map_params( self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" ) # intermediate lowerCAmelCase__ = layer.intermediate lowerCAmelCase__ = check_and_map_params( intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" ) lowerCAmelCase__ = check_and_map_params( intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" ) # output lowerCAmelCase__ = layer.output lowerCAmelCase__ = check_and_map_params( bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" ) lowerCAmelCase__ = check_and_map_params( bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" ) lowerCAmelCase__ = check_and_map_params( bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" ) lowerCAmelCase__ = check_and_map_params( bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models lowerCAmelCase__ = RobertaTokenizer.from_pretrained("""roberta-base""" ) lowerCAmelCase__ = tokenizer.encode_plus(lowerCamelCase__ )["""input_ids"""] # Get gluon output lowerCAmelCase__ = mx.nd.array([input_ids] ) lowerCAmelCase__ = original_bort(inputs=lowerCamelCase__ , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(lowerCamelCase__ ) lowerCAmelCase__ = BertModel.from_pretrained(lowerCamelCase__ ) hf_bort_model.eval() lowerCAmelCase__ = tokenizer.encode_plus(lowerCamelCase__ , return_tensors="""pt""" ) lowerCAmelCase__ = hf_bort_model(**lowerCamelCase__ )[0] lowerCAmelCase__ = output_gluon[0].asnumpy() lowerCAmelCase__ = output_hf[0].detach().numpy() lowerCAmelCase__ = np.max(np.abs(hf_layer - gluon_layer ) ).item() lowerCAmelCase__ = np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) if success: print("""✔️ Both model do output the same tensors""" ) else: print("""❌ Both model do **NOT** output the same tensors""" ) print("""Absolute difference is:""" , lowerCamelCase__ ) if __name__ == "__main__": __lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __lowerCAmelCase : str = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
674
1
"""simple docstring""" from collections.abc import Sequence from queue import Queue class a_ : def __init__( self : List[Any] , snake_case__ : Any , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : str=None , snake_case__ : str=None ): lowerCAmelCase__ = start lowerCAmelCase__ = end lowerCAmelCase__ = val lowerCAmelCase__ = (start + end) // 2 lowerCAmelCase__ = left lowerCAmelCase__ = right def __repr__( self : str ): return F"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})""" class a_ : def __init__( self : Optional[int] , snake_case__ : Sequence , snake_case__ : List[Any] ): lowerCAmelCase__ = collection lowerCAmelCase__ = function if self.collection: lowerCAmelCase__ = self._build_tree(0 , len(snake_case__ ) - 1 ) def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any] ): self._update_tree(self.root , snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : str , snake_case__ : List[str] ): return self._query_range(self.root , snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] ): if start == end: return SegmentTreeNode(snake_case__ , snake_case__ , self.collection[start] ) lowerCAmelCase__ = (start + end) // 2 lowerCAmelCase__ = self._build_tree(snake_case__ , snake_case__ ) lowerCAmelCase__ = self._build_tree(mid + 1 , snake_case__ ) return SegmentTreeNode(snake_case__ , snake_case__ , self.fn(left.val , right.val ) , snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] ): if node.start == i and node.end == i: lowerCAmelCase__ = val return if i <= node.mid: self._update_tree(node.left , snake_case__ , snake_case__ ) else: self._update_tree(node.right , snake_case__ , snake_case__ ) lowerCAmelCase__ = self.fn(node.left.val , node.right.val ) def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Any ): if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left , snake_case__ , snake_case__ ) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left , snake_case__ , node.mid ) , self._query_range(node.right , node.mid + 1 , snake_case__ ) , ) else: # range in right child tree return self._query_range(node.right , snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): if self.root is not None: lowerCAmelCase__ = Queue() queue.put(self.root ) while not queue.empty(): lowerCAmelCase__ = queue.get() yield node if node.left is not None: queue.put(node.left ) if node.right is not None: queue.put(node.right ) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print("*" * 50) __lowerCAmelCase : str = SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
674
"""simple docstring""" import copy import os import cva import numpy as np from matplotlib import pyplot as plt class a_ : def __init__( self : Optional[int] ): lowerCAmelCase__ = """""" lowerCAmelCase__ = """""" lowerCAmelCase__ = [] lowerCAmelCase__ = 0 lowerCAmelCase__ = 256 lowerCAmelCase__ = 0 lowerCAmelCase__ = 0 lowerCAmelCase__ = 0 lowerCAmelCase__ = 0 def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Union[str, Any] ): lowerCAmelCase__ = cva.imread(snake_case__ , 0 ) lowerCAmelCase__ = copy.deepcopy(self.img ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = plt.hist(self.img.ravel() , 256 , [0, 256] , label="""x""" ) lowerCAmelCase__ = np.sum(snake_case__ ) for i in range(len(snake_case__ ) ): lowerCAmelCase__ = x[i] / self.k self.sk += prk lowerCAmelCase__ = (self.L - 1) * self.sk if self.rem != 0: lowerCAmelCase__ = int(last % last ) lowerCAmelCase__ = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(snake_case__ ) lowerCAmelCase__ = int(np.ma.count(self.img ) / self.img[1].size ) lowerCAmelCase__ = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): lowerCAmelCase__ = self.img[j][i] if num != self.last_list[num]: lowerCAmelCase__ = self.last_list[num] cva.imwrite("""output_data/output.jpg""" , self.img ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): plt.hist(self.img.ravel() , 256 , [0, 256] ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): cva.imshow("""Output-Image""" , self.img ) cva.imshow("""Input-Image""" , self.original_image ) cva.waitKey(5000 ) cva.destroyAllWindows() if __name__ == "__main__": __lowerCAmelCase : Dict = os.path.join(os.path.basename(__file__), "image_data/input.jpg") __lowerCAmelCase : Optional[int] = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
674
1
"""simple docstring""" from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class a_ ( __UpperCamelCase ): UpperCamelCase_ : torch.FloatTensor class a_ ( nn.Module ): def __init__( self : Dict , snake_case__ : str=3 , snake_case__ : str=3 , snake_case__ : List[str]=("DownEncoderBlock2D",) , snake_case__ : Tuple=(64,) , snake_case__ : str=2 , snake_case__ : Tuple=32 , snake_case__ : List[str]="silu" , snake_case__ : int=True , ): super().__init__() lowerCAmelCase__ = layers_per_block lowerCAmelCase__ = torch.nn.Convad( snake_case__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) lowerCAmelCase__ = None lowerCAmelCase__ = nn.ModuleList([] ) # down lowerCAmelCase__ = block_out_channels[0] for i, down_block_type in enumerate(snake_case__ ): lowerCAmelCase__ = output_channel lowerCAmelCase__ = block_out_channels[i] lowerCAmelCase__ = i == len(snake_case__ ) - 1 lowerCAmelCase__ = get_down_block( snake_case__ , num_layers=self.layers_per_block , in_channels=snake_case__ , out_channels=snake_case__ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=snake_case__ , resnet_groups=snake_case__ , attention_head_dim=snake_case__ , temb_channels=snake_case__ , ) self.down_blocks.append(snake_case__ ) # mid lowerCAmelCase__ = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=snake_case__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=snake_case__ , temb_channels=snake_case__ , ) # out lowerCAmelCase__ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=snake_case__ , eps=1E-6 ) lowerCAmelCase__ = nn.SiLU() lowerCAmelCase__ = 2 * out_channels if double_z else out_channels lowerCAmelCase__ = nn.Convad(block_out_channels[-1] , snake_case__ , 3 , padding=1 ) lowerCAmelCase__ = False def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Optional[int] ): lowerCAmelCase__ = x lowerCAmelCase__ = self.conv_in(snake_case__ ) if self.training and self.gradient_checkpointing: def create_custom_forward(snake_case__ : List[Any] ): def custom_forward(*snake_case__ : str ): return module(*snake_case__ ) return custom_forward # down if is_torch_version(""">=""" , """1.11.0""" ): for down_block in self.down_blocks: lowerCAmelCase__ = torch.utils.checkpoint.checkpoint( create_custom_forward(snake_case__ ) , snake_case__ , use_reentrant=snake_case__ ) # middle lowerCAmelCase__ = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , snake_case__ , use_reentrant=snake_case__ ) else: for down_block in self.down_blocks: lowerCAmelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(snake_case__ ) , snake_case__ ) # middle lowerCAmelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , snake_case__ ) else: # down for down_block in self.down_blocks: lowerCAmelCase__ = down_block(snake_case__ ) # middle lowerCAmelCase__ = self.mid_block(snake_case__ ) # post-process lowerCAmelCase__ = self.conv_norm_out(snake_case__ ) lowerCAmelCase__ = self.conv_act(snake_case__ ) lowerCAmelCase__ = self.conv_out(snake_case__ ) return sample class a_ ( nn.Module ): def __init__( self : Union[str, Any] , snake_case__ : Any=3 , snake_case__ : int=3 , snake_case__ : List[str]=("UpDecoderBlock2D",) , snake_case__ : Tuple=(64,) , snake_case__ : List[Any]=2 , snake_case__ : str=32 , snake_case__ : Optional[Any]="silu" , snake_case__ : str="group" , ): super().__init__() lowerCAmelCase__ = layers_per_block lowerCAmelCase__ = nn.Convad( snake_case__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) lowerCAmelCase__ = None lowerCAmelCase__ = nn.ModuleList([] ) lowerCAmelCase__ = in_channels if norm_type == """spatial""" else None # mid lowerCAmelCase__ = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=snake_case__ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=snake_case__ , temb_channels=snake_case__ , ) # up lowerCAmelCase__ = list(reversed(snake_case__ ) ) lowerCAmelCase__ = reversed_block_out_channels[0] for i, up_block_type in enumerate(snake_case__ ): lowerCAmelCase__ = output_channel lowerCAmelCase__ = reversed_block_out_channels[i] lowerCAmelCase__ = i == len(snake_case__ ) - 1 lowerCAmelCase__ = get_up_block( snake_case__ , num_layers=self.layers_per_block + 1 , in_channels=snake_case__ , out_channels=snake_case__ , prev_output_channel=snake_case__ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=snake_case__ , resnet_groups=snake_case__ , attention_head_dim=snake_case__ , temb_channels=snake_case__ , resnet_time_scale_shift=snake_case__ , ) self.up_blocks.append(snake_case__ ) lowerCAmelCase__ = output_channel # out if norm_type == "spatial": lowerCAmelCase__ = SpatialNorm(block_out_channels[0] , snake_case__ ) else: lowerCAmelCase__ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=snake_case__ , eps=1E-6 ) lowerCAmelCase__ = nn.SiLU() lowerCAmelCase__ = nn.Convad(block_out_channels[0] , snake_case__ , 3 , padding=1 ) lowerCAmelCase__ = False def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any]=None ): lowerCAmelCase__ = z lowerCAmelCase__ = self.conv_in(snake_case__ ) lowerCAmelCase__ = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(snake_case__ : str ): def custom_forward(*snake_case__ : Union[str, Any] ): return module(*snake_case__ ) return custom_forward if is_torch_version(""">=""" , """1.11.0""" ): # middle lowerCAmelCase__ = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , snake_case__ , snake_case__ , use_reentrant=snake_case__ ) lowerCAmelCase__ = sample.to(snake_case__ ) # up for up_block in self.up_blocks: lowerCAmelCase__ = torch.utils.checkpoint.checkpoint( create_custom_forward(snake_case__ ) , snake_case__ , snake_case__ , use_reentrant=snake_case__ ) else: # middle lowerCAmelCase__ = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , snake_case__ , snake_case__ ) lowerCAmelCase__ = sample.to(snake_case__ ) # up for up_block in self.up_blocks: lowerCAmelCase__ = torch.utils.checkpoint.checkpoint(create_custom_forward(snake_case__ ) , snake_case__ , snake_case__ ) else: # middle lowerCAmelCase__ = self.mid_block(snake_case__ , snake_case__ ) lowerCAmelCase__ = sample.to(snake_case__ ) # up for up_block in self.up_blocks: lowerCAmelCase__ = up_block(snake_case__ , snake_case__ ) # post-process if latent_embeds is None: lowerCAmelCase__ = self.conv_norm_out(snake_case__ ) else: lowerCAmelCase__ = self.conv_norm_out(snake_case__ , snake_case__ ) lowerCAmelCase__ = self.conv_act(snake_case__ ) lowerCAmelCase__ = self.conv_out(snake_case__ ) return sample class a_ ( nn.Module ): def __init__( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Any=None , snake_case__ : str="random" , snake_case__ : Optional[int]=False , snake_case__ : List[Any]=True ): super().__init__() lowerCAmelCase__ = n_e lowerCAmelCase__ = vq_embed_dim lowerCAmelCase__ = beta lowerCAmelCase__ = legacy lowerCAmelCase__ = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) lowerCAmelCase__ = remap if self.remap is not None: self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) ) lowerCAmelCase__ = self.used.shape[0] lowerCAmelCase__ = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": lowerCAmelCase__ = self.re_embed lowerCAmelCase__ = self.re_embed + 1 print( F"""Remapping {self.n_e} indices to {self.re_embed} indices. """ F"""Using {self.unknown_index} for unknown indices.""" ) else: lowerCAmelCase__ = n_e lowerCAmelCase__ = sane_index_shape def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Dict ): lowerCAmelCase__ = inds.shape assert len(snake_case__ ) > 1 lowerCAmelCase__ = inds.reshape(ishape[0] , -1 ) lowerCAmelCase__ = self.used.to(snake_case__ ) lowerCAmelCase__ = (inds[:, :, None] == used[None, None, ...]).long() lowerCAmelCase__ = match.argmax(-1 ) lowerCAmelCase__ = match.sum(2 ) < 1 if self.unknown_index == "random": lowerCAmelCase__ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: lowerCAmelCase__ = self.unknown_index return new.reshape(snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : str ): lowerCAmelCase__ = inds.shape assert len(snake_case__ ) > 1 lowerCAmelCase__ = inds.reshape(ishape[0] , -1 ) lowerCAmelCase__ = self.used.to(snake_case__ ) if self.re_embed > self.used.shape[0]: # extra token lowerCAmelCase__ = 0 # simply set to zero lowerCAmelCase__ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , snake_case__ ) return back.reshape(snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : List[Any] ): # reshape z -> (batch, height, width, channel) and flatten lowerCAmelCase__ = z.permute(0 , 2 , 3 , 1 ).contiguous() lowerCAmelCase__ = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z lowerCAmelCase__ = torch.argmin(torch.cdist(snake_case__ , self.embedding.weight ) , dim=1 ) lowerCAmelCase__ = self.embedding(snake_case__ ).view(z.shape ) lowerCAmelCase__ = None lowerCAmelCase__ = None # compute loss for embedding if not self.legacy: lowerCAmelCase__ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: lowerCAmelCase__ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients lowerCAmelCase__ = z + (z_q - z).detach() # reshape back to match original input shape lowerCAmelCase__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: lowerCAmelCase__ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis lowerCAmelCase__ = self.remap_to_used(snake_case__ ) lowerCAmelCase__ = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: lowerCAmelCase__ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : Tuple , snake_case__ : Union[str, Any] ): # shape specifying (batch, height, width, channel) if self.remap is not None: lowerCAmelCase__ = indices.reshape(shape[0] , -1 ) # add batch axis lowerCAmelCase__ = self.unmap_to_all(snake_case__ ) lowerCAmelCase__ = indices.reshape(-1 ) # flatten again # get quantized latent vectors lowerCAmelCase__ = self.embedding(snake_case__ ) if shape is not None: lowerCAmelCase__ = z_q.view(snake_case__ ) # reshape back to match original input shape lowerCAmelCase__ = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class a_ ( __UpperCamelCase ): def __init__( self : Dict , snake_case__ : Any , snake_case__ : List[str]=False ): lowerCAmelCase__ = parameters lowerCAmelCase__ , lowerCAmelCase__ = torch.chunk(snake_case__ , 2 , dim=1 ) lowerCAmelCase__ = torch.clamp(self.logvar , -30.0 , 20.0 ) lowerCAmelCase__ = deterministic lowerCAmelCase__ = torch.exp(0.5 * self.logvar ) lowerCAmelCase__ = torch.exp(self.logvar ) if self.deterministic: lowerCAmelCase__ = lowerCAmelCase__ = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Optional[torch.Generator] = None ): # make sure sample is on the same device as the parameters and has same dtype lowerCAmelCase__ = randn_tensor( self.mean.shape , generator=snake_case__ , device=self.parameters.device , dtype=self.parameters.dtype ) lowerCAmelCase__ = self.mean + self.std * sample return x def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : Dict=None ): if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : int , snake_case__ : int=[1, 2, 3] ): if self.deterministic: return torch.Tensor([0.0] ) lowerCAmelCase__ = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): return self.mean
674
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class a_ ( __UpperCamelCase ): UpperCamelCase_ : List[str] = ( "This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image." "It takes two arguments named `image` which should be the original image, and `label` which should be a text " "describing the elements what should be identified in the segmentation mask. The tool returns the mask." ) UpperCamelCase_ : str = "CIDAS/clipseg-rd64-refined" UpperCamelCase_ : Any = "image_segmenter" UpperCamelCase_ : Optional[Any] = CLIPSegForImageSegmentation UpperCamelCase_ : List[str] = ["image", "text"] UpperCamelCase_ : int = ["image"] def __init__( self : Tuple , *snake_case__ : str , **snake_case__ : Optional[Any] ): requires_backends(self , ["""vision"""] ) super().__init__(*snake_case__ , **snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : "Image" , snake_case__ : str ): return self.pre_processor(text=[label] , images=[image] , padding=snake_case__ , return_tensors="""pt""" ) def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Tuple ): with torch.no_grad(): lowerCAmelCase__ = self.model(**snake_case__ ).logits return logits def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : List[Any] ): lowerCAmelCase__ = outputs.cpu().detach().numpy() lowerCAmelCase__ = 0 lowerCAmelCase__ = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
674
1
"""simple docstring""" import warnings from ..trainer import Trainer from ..utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) class a_ ( __UpperCamelCase ): def __init__( self : Optional[Any] , snake_case__ : List[Any]=None , **snake_case__ : Dict ): warnings.warn( """`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """ """instead.""" , snake_case__ , ) super().__init__(args=snake_case__ , **snake_case__ )
674
"""simple docstring""" import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class a_ ( __UpperCamelCase , unittest.TestCase ): UpperCamelCase_ : str = LayoutLMTokenizer UpperCamelCase_ : List[Any] = LayoutLMTokenizerFast UpperCamelCase_ : Dict = True UpperCamelCase_ : Any = True def _SCREAMING_SNAKE_CASE ( self : Tuple ): super().setUp() lowerCAmelCase__ = [ """[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def _SCREAMING_SNAKE_CASE ( self : int , **snake_case__ : Union[str, Any] ): return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Tuple ): lowerCAmelCase__ = """UNwant\u00E9d,running""" lowerCAmelCase__ = """unwanted, running""" return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): lowerCAmelCase__ = self.tokenizer_class(self.vocab_file ) lowerCAmelCase__ = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(snake_case__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [7, 4, 5, 10, 8, 9] ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): pass
674
1
"""simple docstring""" import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class a_ ( __UpperCamelCase ): UpperCamelCase_ : BigBirdConfig UpperCamelCase_ : jnp.dtype = jnp.floataa UpperCamelCase_ : bool = True def _SCREAMING_SNAKE_CASE ( self : Any ): super().setup() lowerCAmelCase__ = nn.Dense(5 , dtype=self.dtype ) def __call__( self : str , *snake_case__ : int , **snake_case__ : Optional[int] ): lowerCAmelCase__ = super().__call__(*snake_case__ , **snake_case__ ) lowerCAmelCase__ = self.cls(outputs[2] ) return outputs[:2] + (cls_out,) class a_ ( __UpperCamelCase ): UpperCamelCase_ : Any = FlaxBigBirdForNaturalQuestionsModule def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" def cross_entropy(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ): lowerCAmelCase__ = logits.shape[-1] lowerCAmelCase__ = (labels[..., None] == jnp.arange(lowerCamelCase__ )[None]).astype("""f4""" ) lowerCAmelCase__ = jax.nn.log_softmax(lowerCamelCase__ , axis=-1 ) lowerCAmelCase__ = -jnp.sum(labels * logits , axis=-1 ) if reduction is not None: lowerCAmelCase__ = reduction(lowerCamelCase__ ) return loss lowerCAmelCase__ = partial(lowerCamelCase__ , reduction=jnp.mean ) lowerCAmelCase__ = cross_entropy(lowerCamelCase__ , lowerCamelCase__ ) lowerCAmelCase__ = cross_entropy(lowerCamelCase__ , lowerCamelCase__ ) lowerCAmelCase__ = cross_entropy(lowerCamelCase__ , lowerCamelCase__ ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class a_ : UpperCamelCase_ : str = "google/bigbird-roberta-base" UpperCamelCase_ : int = 3000 UpperCamelCase_ : int = 1_0500 UpperCamelCase_ : int = 128 UpperCamelCase_ : int = 3 UpperCamelCase_ : int = 1 UpperCamelCase_ : int = 5 # tx_args UpperCamelCase_ : float = 3e-5 UpperCamelCase_ : float = 0.0 UpperCamelCase_ : int = 2_0000 UpperCamelCase_ : float = 0.00_95 UpperCamelCase_ : str = "bigbird-roberta-natural-questions" UpperCamelCase_ : str = "training-expt" UpperCamelCase_ : str = "data/nq-training.jsonl" UpperCamelCase_ : str = "data/nq-validation.jsonl" def _SCREAMING_SNAKE_CASE ( self : int ): os.makedirs(self.base_dir , exist_ok=snake_case__ ) lowerCAmelCase__ = os.path.join(self.base_dir , self.save_dir ) lowerCAmelCase__ = self.batch_size_per_device * jax.device_count() @dataclass class a_ : UpperCamelCase_ : int UpperCamelCase_ : int = 4096 # no dynamic padding on TPUs def __call__( self : int , snake_case__ : Any ): lowerCAmelCase__ = self.collate_fn(snake_case__ ) lowerCAmelCase__ = jax.tree_util.tree_map(snake_case__ , snake_case__ ) return batch def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Dict ): lowerCAmelCase__ , lowerCAmelCase__ = self.fetch_inputs(features["""input_ids"""] ) lowerCAmelCase__ = { """input_ids""": jnp.array(snake_case__ , dtype=jnp.intaa ), """attention_mask""": jnp.array(snake_case__ , dtype=jnp.intaa ), """start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ), """end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ), """pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ), } return batch def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : list ): lowerCAmelCase__ = [self._fetch_inputs(snake_case__ ) for ids in input_ids] return zip(*snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : list ): lowerCAmelCase__ = [1 for _ in range(len(snake_case__ ) )] while len(snake_case__ ) < self.max_length: input_ids.append(self.pad_id ) attention_mask.append(0 ) return input_ids, attention_mask def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ): """simple docstring""" if seed is not None: lowerCAmelCase__ = dataset.shuffle(seed=lowerCamelCase__ ) for i in range(len(lowerCamelCase__ ) // batch_size ): lowerCAmelCase__ = dataset[i * batch_size : (i + 1) * batch_size] yield dict(lowerCamelCase__ ) @partial(jax.pmap , axis_name="""batch""" ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" def loss_fn(lowerCamelCase__ ): lowerCAmelCase__ = model_inputs.pop("""start_labels""" ) lowerCAmelCase__ = model_inputs.pop("""end_labels""" ) lowerCAmelCase__ = model_inputs.pop("""pooled_labels""" ) lowerCAmelCase__ = state.apply_fn(**lowerCamelCase__ , params=lowerCamelCase__ , dropout_rng=lowerCamelCase__ , train=lowerCamelCase__ ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = outputs return state.loss_fn( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) lowerCAmelCase__ , lowerCAmelCase__ = jax.random.split(lowerCamelCase__ ) lowerCAmelCase__ = jax.value_and_grad(lowerCamelCase__ ) lowerCAmelCase__ , lowerCAmelCase__ = grad_fn(state.params ) lowerCAmelCase__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) lowerCAmelCase__ = jax.lax.pmean(lowerCamelCase__ , """batch""" ) lowerCAmelCase__ = state.apply_gradients(grads=lowerCamelCase__ ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name="""batch""" ) def _UpperCAmelCase ( lowerCamelCase__ , **lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = model_inputs.pop("""start_labels""" ) lowerCAmelCase__ = model_inputs.pop("""end_labels""" ) lowerCAmelCase__ = model_inputs.pop("""pooled_labels""" ) lowerCAmelCase__ = state.apply_fn(**lowerCamelCase__ , params=state.params , train=lowerCamelCase__ ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = outputs lowerCAmelCase__ = state.loss_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) lowerCAmelCase__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) return metrics class a_ ( train_state.TrainState ): UpperCamelCase_ : Callable = struct.field(pytree_node=__UpperCamelCase ) @dataclass class a_ : UpperCamelCase_ : Args UpperCamelCase_ : Callable UpperCamelCase_ : Callable UpperCamelCase_ : Callable UpperCamelCase_ : Callable UpperCamelCase_ : wandb UpperCamelCase_ : Callable = None def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : int=None ): lowerCAmelCase__ = model.params lowerCAmelCase__ = TrainState.create( apply_fn=model.__call__ , params=snake_case__ , tx=snake_case__ , loss_fn=snake_case__ , ) if ckpt_dir is not None: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = restore_checkpoint(snake_case__ , snake_case__ ) lowerCAmelCase__ = { """lr""": args.lr, """init_lr""": args.init_lr, """warmup_steps""": args.warmup_steps, """num_train_steps""": num_train_steps, """weight_decay""": args.weight_decay, } lowerCAmelCase__ , lowerCAmelCase__ = build_tx(**snake_case__ ) lowerCAmelCase__ = train_state.TrainState( step=snake_case__ , apply_fn=model.__call__ , params=snake_case__ , tx=snake_case__ , opt_state=snake_case__ , ) lowerCAmelCase__ = args lowerCAmelCase__ = data_collator lowerCAmelCase__ = lr lowerCAmelCase__ = params lowerCAmelCase__ = jax_utils.replicate(snake_case__ ) return state def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : Any ): lowerCAmelCase__ = self.args lowerCAmelCase__ = len(snake_case__ ) // args.batch_size lowerCAmelCase__ = jax.random.PRNGKey(0 ) lowerCAmelCase__ = jax.random.split(snake_case__ , jax.device_count() ) for epoch in range(args.max_epochs ): lowerCAmelCase__ = jnp.array(0 , dtype=jnp.floataa ) lowerCAmelCase__ = get_batched_dataset(snake_case__ , args.batch_size , seed=snake_case__ ) lowerCAmelCase__ = 0 for batch in tqdm(snake_case__ , total=snake_case__ , desc=F"""Running EPOCH-{epoch}""" ): lowerCAmelCase__ = self.data_collator(snake_case__ ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.train_step_fn(snake_case__ , snake_case__ , **snake_case__ ) running_loss += jax_utils.unreplicate(metrics["""loss"""] ) i += 1 if i % args.logging_steps == 0: lowerCAmelCase__ = jax_utils.unreplicate(state.step ) lowerCAmelCase__ = running_loss.item() / i lowerCAmelCase__ = self.scheduler_fn(state_step - 1 ) lowerCAmelCase__ = self.evaluate(snake_case__ , snake_case__ ) lowerCAmelCase__ = { """step""": state_step.item(), """eval_loss""": eval_loss.item(), """tr_loss""": tr_loss, """lr""": lr.item(), } tqdm.write(str(snake_case__ ) ) self.logger.log(snake_case__ , commit=snake_case__ ) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + F"""-e{epoch}-s{i}""" , state=snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Optional[Any] , snake_case__ : int ): lowerCAmelCase__ = get_batched_dataset(snake_case__ , self.args.batch_size ) lowerCAmelCase__ = len(snake_case__ ) // self.args.batch_size lowerCAmelCase__ = jnp.array(0 , dtype=jnp.floataa ) lowerCAmelCase__ = 0 for batch in tqdm(snake_case__ , total=snake_case__ , desc="""Evaluating ... """ ): lowerCAmelCase__ = self.data_collator(snake_case__ ) lowerCAmelCase__ = self.val_step_fn(snake_case__ , **snake_case__ ) running_loss += jax_utils.unreplicate(metrics["""loss"""] ) i += 1 return running_loss / i def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Any , snake_case__ : Any ): lowerCAmelCase__ = jax_utils.unreplicate(snake_case__ ) print(F"""SAVING CHECKPOINT IN {save_dir}""" , end=""" ... """ ) self.model_save_fn(snake_case__ , params=state.params ) with open(os.path.join(snake_case__ , """opt_state.msgpack""" ) , """wb""" ) as f: f.write(to_bytes(state.opt_state ) ) joblib.dump(self.args , os.path.join(snake_case__ , """args.joblib""" ) ) joblib.dump(self.data_collator , os.path.join(snake_case__ , """data_collator.joblib""" ) ) with open(os.path.join(snake_case__ , """training_state.json""" ) , """w""" ) as f: json.dump({"""step""": state.step.item()} , snake_case__ ) print("""DONE""" ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" print(f"""RESTORING CHECKPOINT FROM {save_dir}""" , end=""" ... """ ) with open(os.path.join(lowerCamelCase__ , """flax_model.msgpack""" ) , """rb""" ) as f: lowerCAmelCase__ = from_bytes(state.params , f.read() ) with open(os.path.join(lowerCamelCase__ , """opt_state.msgpack""" ) , """rb""" ) as f: lowerCAmelCase__ = from_bytes(state.opt_state , f.read() ) lowerCAmelCase__ = joblib.load(os.path.join(lowerCamelCase__ , """args.joblib""" ) ) lowerCAmelCase__ = joblib.load(os.path.join(lowerCamelCase__ , """data_collator.joblib""" ) ) with open(os.path.join(lowerCamelCase__ , """training_state.json""" ) , """r""" ) as f: lowerCAmelCase__ = json.load(lowerCamelCase__ ) lowerCAmelCase__ = training_state["""step"""] print("""DONE""" ) return params, opt_state, step, args, data_collator def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = num_train_steps - warmup_steps lowerCAmelCase__ = optax.linear_schedule(init_value=lowerCamelCase__ , end_value=lowerCamelCase__ , transition_steps=lowerCamelCase__ ) lowerCAmelCase__ = optax.linear_schedule(init_value=lowerCamelCase__ , end_value=1e-7 , transition_steps=lowerCamelCase__ ) lowerCAmelCase__ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" def weight_decay_mask(lowerCamelCase__ ): lowerCAmelCase__ = traverse_util.flatten_dict(lowerCamelCase__ ) lowerCAmelCase__ = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()} return traverse_util.unflatten_dict(lowerCamelCase__ ) lowerCAmelCase__ = scheduler_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) lowerCAmelCase__ = optax.adamw(learning_rate=lowerCamelCase__ , weight_decay=lowerCamelCase__ , mask=lowerCamelCase__ ) return tx, lr
674
"""simple docstring""" from binascii import hexlify from hashlib import shaaaa from os import urandom # RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for # Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526 __lowerCAmelCase : Any = { # 1536-bit 5: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, # 2048-bit 14: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AACAA68FFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, # 3072-bit 15: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, # 4096-bit 16: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199" + "FFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, # 6144-bit 17: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08" + "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B" + "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9" + "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6" + "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8" + "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C" + "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718" + "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D" + "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D" + "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226" + "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC" + "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26" + "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB" + "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2" + "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127" + "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406" + "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918" + "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151" + "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03" + "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F" + "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B" + "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632" + "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E" + "6DCC4024FFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, # 8192-bit 18: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD" + "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831" + "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B" + "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF" + "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6" + "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3" + "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328" + "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C" + "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE" + "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4" + "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300" + "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568" + "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9" + "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B" + "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A" + "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36" + "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1" + "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92" + "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47" + "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71" + "60C980DD98EDD3DFFFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, } class a_ : def __init__( self : List[str] , snake_case__ : int = 14 ): if group not in primes: raise ValueError("""Unsupported Group""" ) lowerCAmelCase__ = primes[group]["""prime"""] lowerCAmelCase__ = primes[group]["""generator"""] lowerCAmelCase__ = int(hexlify(urandom(32 ) ) , base=16 ) def _SCREAMING_SNAKE_CASE ( self : Any ): return hex(self.__private_key )[2:] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): lowerCAmelCase__ = pow(self.generator , self.__private_key , self.prime ) return hex(snake_case__ )[2:] def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : int ): # check if the other public key is valid based on NIST SP800-56 return ( 2 <= key <= self.prime - 2 and pow(snake_case__ , (self.prime - 1) // 2 , self.prime ) == 1 ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : str ): lowerCAmelCase__ = int(snake_case__ , base=16 ) if not self.is_valid_public_key(snake_case__ ): raise ValueError("""Invalid public key""" ) lowerCAmelCase__ = pow(snake_case__ , self.__private_key , self.prime ) return shaaaa(str(snake_case__ ).encode() ).hexdigest() @staticmethod def _SCREAMING_SNAKE_CASE ( snake_case__ : int , snake_case__ : int ): # check if the other public key is valid based on NIST SP800-56 return ( 2 <= remote_public_key_str <= prime - 2 and pow(snake_case__ , (prime - 1) // 2 , snake_case__ ) == 1 ) @staticmethod def _SCREAMING_SNAKE_CASE ( snake_case__ : str , snake_case__ : str , snake_case__ : int = 14 ): lowerCAmelCase__ = int(snake_case__ , base=16 ) lowerCAmelCase__ = int(snake_case__ , base=16 ) lowerCAmelCase__ = primes[group]["""prime"""] if not DiffieHellman.is_valid_public_key_static(snake_case__ , snake_case__ ): raise ValueError("""Invalid public key""" ) lowerCAmelCase__ = pow(snake_case__ , snake_case__ , snake_case__ ) return shaaaa(str(snake_case__ ).encode() ).hexdigest() if __name__ == "__main__": import doctest doctest.testmod()
674
1
"""simple docstring""" import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(lowerCamelCase__ , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors lowerCAmelCase__ = load_file(lowerCamelCase__ ) lowerCAmelCase__ = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: lowerCAmelCase__ = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" ) lowerCAmelCase__ = pipeline.text_encoder else: lowerCAmelCase__ = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" ) lowerCAmelCase__ = pipeline.unet # find the target layer lowerCAmelCase__ = layer_infos.pop(0 ) while len(lowerCamelCase__ ) > -1: try: lowerCAmelCase__ = curr_layer.__getattr__(lowerCamelCase__ ) if len(lowerCamelCase__ ) > 0: lowerCAmelCase__ = layer_infos.pop(0 ) elif len(lowerCamelCase__ ) == 0: break except Exception: if len(lowerCamelCase__ ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: lowerCAmelCase__ = layer_infos.pop(0 ) lowerCAmelCase__ = [] if "lora_down" in key: pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) ) pair_keys.append(lowerCamelCase__ ) else: pair_keys.append(lowerCamelCase__ ) pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: lowerCAmelCase__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) lowerCAmelCase__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(lowerCamelCase__ , lowerCamelCase__ ).unsqueeze(2 ).unsqueeze(3 ) else: lowerCAmelCase__ = state_dict[pair_keys[0]].to(torch.floataa ) lowerCAmelCase__ = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(lowerCamelCase__ , lowerCamelCase__ ) # update visited list for item in pair_keys: visited.append(lowerCamelCase__ ) return pipeline if __name__ == "__main__": __lowerCAmelCase : Dict = argparse.ArgumentParser() parser.add_argument( "--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format." ) parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument( "--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors" ) parser.add_argument( "--lora_prefix_text_encoder", default="lora_te", type=str, help="The prefix of text encoder weight in safetensors", ) parser.add_argument("--alpha", default=0.75, type=float, help="The merging ratio in W = W0 + alpha * deltaW") parser.add_argument( "--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not." ) parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)") __lowerCAmelCase : Any = parser.parse_args() __lowerCAmelCase : str = args.base_model_path __lowerCAmelCase : List[str] = args.checkpoint_path __lowerCAmelCase : int = args.dump_path __lowerCAmelCase : Optional[int] = args.lora_prefix_unet __lowerCAmelCase : Union[str, Any] = args.lora_prefix_text_encoder __lowerCAmelCase : Dict = args.alpha __lowerCAmelCase : Optional[int] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) __lowerCAmelCase : str = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
674
"""simple docstring""" import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ): """simple docstring""" assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match""" lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ ) if bias is not None: assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match""" lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = np.asarray(weights[0] ) lowerCAmelCase__ = np.asarray(weights[1] ) lowerCAmelCase__ = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , ) set_param( torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = np.asarray(weights[0] ) lowerCAmelCase__ = np.asarray(weights[1] ) lowerCAmelCase__ = np.asarray(weights[2] ) lowerCAmelCase__ = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , ) set_param( torch_layer.self_attention.key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , ) set_param( torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = weights[0][0][0] lowerCAmelCase__ = np.asarray(layer_norm_a[0] ) lowerCAmelCase__ = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , ) # lsh weights + output lowerCAmelCase__ = weights[0][1] if len(lowerCamelCase__ ) < 4: set_layer_weights_in_torch_lsh(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ ) else: set_layer_weights_in_torch_local(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ ) # intermediate weighs lowerCAmelCase__ = weights[2][0][1][2] # Chunked Feed Forward if len(lowerCamelCase__ ) == 4: lowerCAmelCase__ = intermediate_weights[2] # layernorm 2 lowerCAmelCase__ = np.asarray(intermediate_weights[0][0] ) lowerCAmelCase__ = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , ) # intermediate dense lowerCAmelCase__ = np.asarray(intermediate_weights[1][0] ) lowerCAmelCase__ = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , ) # intermediate out lowerCAmelCase__ = np.asarray(intermediate_weights[4][0] ) lowerCAmelCase__ = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = torch_model.reformer # word embeds lowerCAmelCase__ = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase__ ) , ) if isinstance(weights[3] , lowerCamelCase__ ): lowerCAmelCase__ = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): lowerCAmelCase__ = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), f"""{position_embeddings[emb_idx]} emb does not match""" lowerCAmelCase__ = nn.Parameter(torch.tensor(lowerCamelCase__ ) ) lowerCAmelCase__ = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( lowerCamelCase__ ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): lowerCAmelCase__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # output layer norm lowerCAmelCase__ = np.asarray(weights[7][0] ) lowerCAmelCase__ = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , ) # output embeddings lowerCAmelCase__ = np.asarray(weights[9][0] ) lowerCAmelCase__ = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = ReformerConfig.from_json_file(lowerCamelCase__ ) print(f"""Building PyTorch model from configuration: {config}""" ) lowerCAmelCase__ = ReformerModelWithLMHead(lowerCamelCase__ ) with open(lowerCamelCase__ , """rb""" ) as f: lowerCAmelCase__ = pickle.load(lowerCamelCase__ )["""weights"""] set_model_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , config.hidden_size ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , lowerCamelCase__ ) if __name__ == "__main__": __lowerCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained Reformer model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __lowerCAmelCase : Union[str, Any] = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
674
1
"""simple docstring""" from __future__ import annotations from typing import Generic, TypeVar __lowerCAmelCase : int = TypeVar("T") class a_ ( Generic[T] ): def __init__( self : List[Any] , snake_case__ : T ): lowerCAmelCase__ = data lowerCAmelCase__ = self lowerCAmelCase__ = 0 class a_ ( Generic[T] ): def __init__( self : Optional[int] ): # map from node name to the node object lowerCAmelCase__ = {} def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : T ): # create a new set with x as its member lowerCAmelCase__ = DisjointSetTreeNode(snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : T ): # find the set x belongs to (with path-compression) lowerCAmelCase__ = self.map[data] if elem_ref != elem_ref.parent: lowerCAmelCase__ = self.find_set(elem_ref.parent.data ) return elem_ref.parent def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : DisjointSetTreeNode[T] , snake_case__ : DisjointSetTreeNode[T] ): # helper function for union operation if nodea.rank > nodea.rank: lowerCAmelCase__ = nodea else: lowerCAmelCase__ = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : T , snake_case__ : T ): # merge 2 disjoint sets self.link(self.find_set(snake_case__ ) , self.find_set(snake_case__ ) ) class a_ ( Generic[T] ): def __init__( self : List[Any] ): # connections: map from the node to the neighbouring nodes (with weights) lowerCAmelCase__ = {} def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : T ): # add a node ONLY if its not present in the graph if node not in self.connections: lowerCAmelCase__ = {} def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : T , snake_case__ : T , snake_case__ : int ): # add an edge with the given weight self.add_node(snake_case__ ) self.add_node(snake_case__ ) lowerCAmelCase__ = weight lowerCAmelCase__ = weight def _SCREAMING_SNAKE_CASE ( self : List[Any] ): lowerCAmelCase__ = [] lowerCAmelCase__ = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda snake_case__ : x[2] ) # creating the disjoint set lowerCAmelCase__ = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(snake_case__ ) # MST generation lowerCAmelCase__ = 0 lowerCAmelCase__ = 0 lowerCAmelCase__ = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = edges[index] index += 1 lowerCAmelCase__ = disjoint_set.find_set(snake_case__ ) lowerCAmelCase__ = disjoint_set.find_set(snake_case__ ) if parent_u != parent_v: num_edges += 1 graph.add_edge(snake_case__ , snake_case__ , snake_case__ ) disjoint_set.union(snake_case__ , snake_case__ ) return graph
674
"""simple docstring""" import os from math import logaa def _UpperCAmelCase ( lowerCamelCase__ = "base_exp.txt" ): """simple docstring""" lowerCAmelCase__ = 0 lowerCAmelCase__ = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(lowerCamelCase__ ) , lowerCamelCase__ ) ) ): lowerCAmelCase__ , lowerCAmelCase__ = list(map(lowerCamelCase__ , line.split(""",""" ) ) ) if x * logaa(lowerCamelCase__ ) > largest: lowerCAmelCase__ = x * logaa(lowerCamelCase__ ) lowerCAmelCase__ = i + 1 return result if __name__ == "__main__": print(solution())
674
1
"""simple docstring""" def _UpperCAmelCase ( lowerCamelCase__ = 10**9 ): """simple docstring""" lowerCAmelCase__ = 1 lowerCAmelCase__ = 2 lowerCAmelCase__ = 0 lowerCAmelCase__ = 0 lowerCAmelCase__ = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value lowerCAmelCase__ = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(F"{solution() = }")
674
"""simple docstring""" def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" while b: lowerCAmelCase__ , lowerCAmelCase__ = b, a % b return a def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" return a if b == 0 else euclidean_gcd_recursive(lowerCamelCase__ , a % b ) def _UpperCAmelCase ( ): """simple docstring""" print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" ) print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" ) print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" ) print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" ) print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" ) print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" ) print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" ) print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" ) if __name__ == "__main__": main()
674
1
"""simple docstring""" from dataclasses import dataclass, field from typing import Optional from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser @dataclass class a_ : UpperCamelCase_ : str = field( metadata={"help": "The output directory where the model will be written."} , ) UpperCamelCase_ : str = field( metadata={ "help": ( "The encoder model checkpoint for weights initialization." "Don't set if you want to train an encoder model from scratch." ) } , ) UpperCamelCase_ : str = field( metadata={ "help": ( "The decoder model checkpoint for weights initialization." "Don't set if you want to train a decoder model from scratch." ) } , ) UpperCamelCase_ : Optional[str] = field( default=__UpperCamelCase , metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} ) UpperCamelCase_ : Optional[str] = field( default=__UpperCamelCase , metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} ) def _UpperCAmelCase ( ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser((ModelArguments,) ) ((lowerCAmelCase__) , ) = parser.parse_args_into_dataclasses() # Load pretrained model and tokenizer # Use explicit specified encoder config if model_args.encoder_config_name: lowerCAmelCase__ = AutoConfig.from_pretrained(model_args.encoder_config_name ) # Use pretrained encoder model's config else: lowerCAmelCase__ = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path ) # Use explicit specified decoder config if model_args.decoder_config_name: lowerCAmelCase__ = AutoConfig.from_pretrained(model_args.decoder_config_name ) # Use pretrained decoder model's config else: lowerCAmelCase__ = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path ) # necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed lowerCAmelCase__ = True lowerCAmelCase__ = True lowerCAmelCase__ = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=lowerCamelCase__ , decoder_config=lowerCamelCase__ , ) # GPT2 only has bos/eos tokens but not decoder_start/pad tokens lowerCAmelCase__ = decoder_config.decoder_start_token_id lowerCAmelCase__ = decoder_config.pad_token_id if decoder_start_token_id is None: lowerCAmelCase__ = decoder_config.bos_token_id if pad_token_id is None: lowerCAmelCase__ = decoder_config.eos_token_id # This is necessary to make Flax's generate() work lowerCAmelCase__ = decoder_config.eos_token_id lowerCAmelCase__ = decoder_start_token_id lowerCAmelCase__ = pad_token_id lowerCAmelCase__ = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path ) lowerCAmelCase__ = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path ) lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(model.config.pad_token_id ) model.save_pretrained(model_args.output_dir ) image_processor.save_pretrained(model_args.output_dir ) tokenizer.save_pretrained(model_args.output_dir ) if __name__ == "__main__": main()
674
"""simple docstring""" import os def _UpperCAmelCase ( ): """simple docstring""" lowerCAmelCase__ = os.path.dirname(os.path.realpath(lowerCamelCase__ ) ) lowerCAmelCase__ = os.path.join(lowerCamelCase__ , """triangle.txt""" ) with open(lowerCamelCase__ ) as f: lowerCAmelCase__ = f.readlines() lowerCAmelCase__ = [] for line in triangle: lowerCAmelCase__ = [] for number in line.strip().split(""" """ ): numbers_from_line.append(int(lowerCamelCase__ ) ) a.append(lowerCamelCase__ ) for i in range(1 , len(lowerCamelCase__ ) ): for j in range(len(a[i] ) ): lowerCAmelCase__ = a[i - 1][j] if j != len(a[i - 1] ) else 0 lowerCAmelCase__ = a[i - 1][j - 1] if j > 0 else 0 a[i][j] += max(lowerCamelCase__ , lowerCamelCase__ ) return max(a[-1] ) if __name__ == "__main__": print(solution())
674
1
"""simple docstring""" __lowerCAmelCase : dict[tuple[int, int, int], int] = {} def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on lowerCAmelCase__ = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one lowerCAmelCase__ = _calculate(days - 1 , lowerCamelCase__ , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 lowerCAmelCase__ = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter lowerCAmelCase__ = _calculate(days - 1 , lowerCamelCase__ , 0 ) lowerCAmelCase__ = state_late + state_absent + state_ontime lowerCAmelCase__ = prizestrings return prizestrings def _UpperCAmelCase ( lowerCamelCase__ = 30 ): """simple docstring""" return _calculate(lowerCamelCase__ , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
674
"""simple docstring""" import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu __lowerCAmelCase : Union[str, Any] = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json" with io.open(filename, "r", encoding="utf-8") as f: __lowerCAmelCase : Optional[int] = json.load(f) @require_torch class a_ ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Dict ): return FSMTTokenizer.from_pretrained(snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Any ): lowerCAmelCase__ = FSMTForConditionalGeneration.from_pretrained(snake_case__ ).to(snake_case__ ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ["""en-ru""", 26.0], ["""ru-en""", 22.0], ["""en-de""", 22.0], ["""de-en""", 29.0], ] ) @slow def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Any , snake_case__ : int ): # note: this test is not testing the best performance since it only evals a small batch # but it should be enough to detect a regression in the output quality lowerCAmelCase__ = F"""facebook/wmt19-{pair}""" lowerCAmelCase__ = self.get_tokenizer(snake_case__ ) lowerCAmelCase__ = self.get_model(snake_case__ ) lowerCAmelCase__ = bleu_data[pair]["""src"""] lowerCAmelCase__ = bleu_data[pair]["""tgt"""] lowerCAmelCase__ = tokenizer(snake_case__ , return_tensors="""pt""" , truncation=snake_case__ , padding="""longest""" ).to(snake_case__ ) lowerCAmelCase__ = model.generate( input_ids=batch.input_ids , num_beams=8 , ) lowerCAmelCase__ = tokenizer.batch_decode( snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ ) lowerCAmelCase__ = calculate_bleu(snake_case__ , snake_case__ ) print(snake_case__ ) self.assertGreaterEqual(scores["""bleu"""] , snake_case__ )
674
1
"""simple docstring""" from __future__ import annotations from math import gcd def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 2 , lowerCamelCase__ = 1 , lowerCamelCase__ = 3 , ): """simple docstring""" if num < 2: raise ValueError("""The input value cannot be less than 2""" ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int: return (pow(lowerCamelCase__ , 2 ) + step) % modulus for _ in range(lowerCamelCase__ ): # These track the position within the cycle detection logic. lowerCAmelCase__ = seed lowerCAmelCase__ = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. lowerCAmelCase__ = gcd(hare - tortoise , lowerCamelCase__ ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. lowerCAmelCase__ = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse __lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( "num", type=int, help="The value to find a divisor of", ) parser.add_argument( "--attempts", type=int, default=3, help="The number of attempts before giving up", ) __lowerCAmelCase : List[str] = parser.parse_args() __lowerCAmelCase : Dict = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(F"{args.num} is probably prime") else: __lowerCAmelCase : List[str] = args.num // divisor print(F"{args.num} = {divisor} * {quotient}")
674
"""simple docstring""" import pprint import requests __lowerCAmelCase : Union[str, Any] = "https://zenquotes.io/api" def _UpperCAmelCase ( ): """simple docstring""" return requests.get(API_ENDPOINT_URL + """/today""" ).json() def _UpperCAmelCase ( ): """simple docstring""" return requests.get(API_ENDPOINT_URL + """/random""" ).json() if __name__ == "__main__": __lowerCAmelCase : Union[str, Any] = random_quotes() pprint.pprint(response)
674
1
"""simple docstring""" import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model __lowerCAmelCase : Optional[Any] = "0.12" # assumed parallelism: 8 if is_torch_available(): import torch def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ): """simple docstring""" if rng is None: lowerCAmelCase__ = random.Random() lowerCAmelCase__ = 1 for dim in shape: total_dims *= dim lowerCAmelCase__ = [] for _ in range(lowerCamelCase__ ): values.append(rng.randint(0 , vocab_size - 1 ) ) lowerCAmelCase__ = np.array(lowerCamelCase__ , dtype=jnp.intaa ).reshape(lowerCamelCase__ ) return output def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__=None ): """simple docstring""" lowerCAmelCase__ = ids_tensor(lowerCamelCase__ , vocab_size=2 , rng=lowerCamelCase__ ) # make sure that at least one token is attended to for each batch lowerCAmelCase__ = 1 return attn_mask @require_flax class a_ : UpperCamelCase_ : List[str] = None UpperCamelCase_ : Any = () def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 lowerCAmelCase__ = 2 lowerCAmelCase__ = inputs["""input_ids"""].shape[-1] // 2 lowerCAmelCase__ = inputs["""input_ids"""][:max_batch_size, :sequence_length] lowerCAmelCase__ = jnp.ones_like(snake_case__ ) lowerCAmelCase__ = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens lowerCAmelCase__ = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` lowerCAmelCase__ = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_input_ids_and_config() lowerCAmelCase__ = False lowerCAmelCase__ = max_length lowerCAmelCase__ = 0 for model_class in self.all_generative_model_classes: lowerCAmelCase__ = model_class(snake_case__ ) lowerCAmelCase__ = model_class.__name__[4:] # Skip the "Flax" at the beginning lowerCAmelCase__ = getattr(snake_case__ , snake_case__ ) lowerCAmelCase__ = pt_model_class(snake_case__ ).eval() lowerCAmelCase__ = load_flax_weights_in_pytorch_model(snake_case__ , flax_model.params ) lowerCAmelCase__ = flax_model.generate(snake_case__ ).sequences lowerCAmelCase__ = pt_model.generate(torch.tensor(snake_case__ , dtype=torch.long ) ) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: lowerCAmelCase__ = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_input_ids_and_config() lowerCAmelCase__ = False lowerCAmelCase__ = max_length for model_class in self.all_generative_model_classes: lowerCAmelCase__ = model_class(snake_case__ ) lowerCAmelCase__ = model.generate(snake_case__ ).sequences self.assertEqual(generation_outputs.shape[-1] , snake_case__ ) lowerCAmelCase__ = jit(model.generate ) lowerCAmelCase__ = jit_generate(snake_case__ ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _SCREAMING_SNAKE_CASE ( self : int ): lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_input_ids_and_config() lowerCAmelCase__ = True lowerCAmelCase__ = max_length for model_class in self.all_generative_model_classes: lowerCAmelCase__ = model_class(snake_case__ ) lowerCAmelCase__ = model.generate(snake_case__ ).sequences self.assertEqual(generation_outputs.shape[-1] , snake_case__ ) lowerCAmelCase__ = jit(model.generate ) lowerCAmelCase__ = jit_generate(snake_case__ ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _SCREAMING_SNAKE_CASE ( self : str ): lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_input_ids_and_config() lowerCAmelCase__ = False lowerCAmelCase__ = max_length lowerCAmelCase__ = 2 for model_class in self.all_generative_model_classes: lowerCAmelCase__ = model_class(snake_case__ ) lowerCAmelCase__ = model.generate(snake_case__ ).sequences self.assertEqual(generation_outputs.shape[-1] , snake_case__ ) lowerCAmelCase__ = jit(model.generate ) lowerCAmelCase__ = jit_generate(snake_case__ ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_input_ids_and_config() lowerCAmelCase__ = False lowerCAmelCase__ = max_length lowerCAmelCase__ = 2 lowerCAmelCase__ = 2 for model_class in self.all_generative_model_classes: lowerCAmelCase__ = model_class(snake_case__ ) lowerCAmelCase__ = model.generate(snake_case__ ).sequences self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences ) def _SCREAMING_SNAKE_CASE ( self : str ): lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_input_ids_and_config() lowerCAmelCase__ = True lowerCAmelCase__ = max_length lowerCAmelCase__ = 0.8 lowerCAmelCase__ = 10 lowerCAmelCase__ = 0.3 lowerCAmelCase__ = 1 lowerCAmelCase__ = 8 lowerCAmelCase__ = 9 for model_class in self.all_generative_model_classes: lowerCAmelCase__ = model_class(snake_case__ ) lowerCAmelCase__ = model.generate(snake_case__ ).sequences self.assertEqual(generation_outputs.shape[-1] , snake_case__ ) lowerCAmelCase__ = jit(model.generate ) lowerCAmelCase__ = jit_generate(snake_case__ ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _SCREAMING_SNAKE_CASE ( self : int ): lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_input_ids_and_config() lowerCAmelCase__ = max_length lowerCAmelCase__ = 1 lowerCAmelCase__ = 8 lowerCAmelCase__ = 9 for model_class in self.all_generative_model_classes: lowerCAmelCase__ = model_class(snake_case__ ) lowerCAmelCase__ = model.generate(snake_case__ ).sequences self.assertEqual(generation_outputs.shape[-1] , snake_case__ ) lowerCAmelCase__ = jit(model.generate ) lowerCAmelCase__ = jit_generate(snake_case__ ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_input_ids_and_config() lowerCAmelCase__ = max_length lowerCAmelCase__ = 2 lowerCAmelCase__ = 1 lowerCAmelCase__ = 8 lowerCAmelCase__ = 9 for model_class in self.all_generative_model_classes: lowerCAmelCase__ = model_class(snake_case__ ) lowerCAmelCase__ = model.generate(snake_case__ ).sequences self.assertEqual(generation_outputs.shape[-1] , snake_case__ ) lowerCAmelCase__ = jit(model.generate ) lowerCAmelCase__ = jit_generate(snake_case__ ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_input_ids_and_config() # pad attention mask on the left lowerCAmelCase__ = attention_mask.at[(0, 0)].set(0 ) lowerCAmelCase__ = False lowerCAmelCase__ = max_length for model_class in self.all_generative_model_classes: lowerCAmelCase__ = model_class(snake_case__ ) lowerCAmelCase__ = model.generate(snake_case__ , attention_mask=snake_case__ ).sequences self.assertEqual(generation_outputs.shape[-1] , snake_case__ ) lowerCAmelCase__ = jit(model.generate ) lowerCAmelCase__ = jit_generate(snake_case__ , attention_mask=snake_case__ ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_input_ids_and_config() # pad attention mask on the left lowerCAmelCase__ = attention_mask.at[(0, 0)].set(0 ) lowerCAmelCase__ = True lowerCAmelCase__ = max_length for model_class in self.all_generative_model_classes: lowerCAmelCase__ = model_class(snake_case__ ) lowerCAmelCase__ = model.generate(snake_case__ , attention_mask=snake_case__ ).sequences self.assertEqual(generation_outputs.shape[-1] , snake_case__ ) lowerCAmelCase__ = jit(model.generate ) lowerCAmelCase__ = jit_generate(snake_case__ , attention_mask=snake_case__ ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_input_ids_and_config() # pad attention mask on the left lowerCAmelCase__ = attention_mask.at[(0, 0)].set(0 ) lowerCAmelCase__ = 2 lowerCAmelCase__ = max_length for model_class in self.all_generative_model_classes: lowerCAmelCase__ = model_class(snake_case__ ) lowerCAmelCase__ = model.generate(snake_case__ , attention_mask=snake_case__ ).sequences self.assertEqual(generation_outputs.shape[-1] , snake_case__ ) lowerCAmelCase__ = jit(model.generate ) lowerCAmelCase__ = jit_generate(snake_case__ , attention_mask=snake_case__ ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) @require_flax class a_ ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): lowerCAmelCase__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" ) lowerCAmelCase__ = FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" ) lowerCAmelCase__ = """Hello world""" lowerCAmelCase__ = tokenizer(snake_case__ , return_tensors="""np""" ).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(snake_case__ , """do_samples""" ): model.generate(snake_case__ , do_samples=snake_case__ ) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(snake_case__ , """foo""" ): lowerCAmelCase__ = {"""foo""": """bar"""} model.generate(snake_case__ , **snake_case__ )
674
"""simple docstring""" import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class a_ ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): lowerCAmelCase__ = 0 def _SCREAMING_SNAKE_CASE ( self : List[Any] ): lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" ) self.assertIsInstance(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json""" lowerCAmelCase__ = Path(snake_case__ ) / """config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json""" lowerCAmelCase__ = Path(snake_case__ ) / """config.json""" json.dump( {"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ = CLIPConfig() # Create a dummy config file with image_proceesor_type lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json""" lowerCAmelCase__ = Path(snake_case__ ) / """config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ).to_dict() config_dict.pop("""image_processor_type""" ) lowerCAmelCase__ = CLIPImageProcessor(**snake_case__ ) # save in new folder model_config.save_pretrained(snake_case__ ) config.save_pretrained(snake_case__ ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ) # make sure private variable is not incorrectly saved lowerCAmelCase__ = json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Dict ): with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): with self.assertRaisesRegex( snake_case__ , """clip-base is not a local folder and is not a valid model identifier""" ): lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""clip-base""" ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): with self.assertRaisesRegex( snake_case__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ , revision="""aaaaaa""" ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): with self.assertRaisesRegex( snake_case__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ): lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" ) def _SCREAMING_SNAKE_CASE ( self : Any ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(snake_case__ ): lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(snake_case__ ): lowerCAmelCase__ = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(snake_case__ ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ , trust_remote_code=snake_case__ ) self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" ) def _SCREAMING_SNAKE_CASE ( self : Dict ): try: AutoConfig.register("""custom""" , snake_case__ ) AutoImageProcessor.register(snake_case__ , snake_case__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(snake_case__ ): AutoImageProcessor.register(snake_case__ , snake_case__ ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json""" lowerCAmelCase__ = Path(snake_case__ ) / """config.json""" json.dump( {"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) ) lowerCAmelCase__ = CustomImageProcessor.from_pretrained(snake_case__ ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(snake_case__ ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def _SCREAMING_SNAKE_CASE ( self : List[str] ): class a_ ( __UpperCamelCase ): UpperCamelCase_ : Tuple = True try: AutoConfig.register("""custom""" , snake_case__ ) AutoImageProcessor.register(snake_case__ , snake_case__ ) # If remote code is not set, the default is to use local lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. lowerCAmelCase__ = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub lowerCAmelCase__ = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(not hasattr(snake_case__ , """is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
674
1
"""simple docstring""" import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetrImageProcessor class a_ ( unittest.TestCase ): def __init__( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Tuple=7 , snake_case__ : str=3 , snake_case__ : int=30 , snake_case__ : Dict=400 , snake_case__ : Optional[Any]=True , snake_case__ : int=None , snake_case__ : Any=True , snake_case__ : str=1 / 255 , snake_case__ : Tuple=True , snake_case__ : int=[0.5, 0.5, 0.5] , snake_case__ : List[str]=[0.5, 0.5, 0.5] , snake_case__ : Optional[int]=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p lowerCAmelCase__ = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333} lowerCAmelCase__ = parent lowerCAmelCase__ = batch_size lowerCAmelCase__ = num_channels lowerCAmelCase__ = min_resolution lowerCAmelCase__ = max_resolution lowerCAmelCase__ = do_resize lowerCAmelCase__ = size lowerCAmelCase__ = do_rescale lowerCAmelCase__ = rescale_factor lowerCAmelCase__ = do_normalize lowerCAmelCase__ = image_mean lowerCAmelCase__ = image_std lowerCAmelCase__ = do_pad def _SCREAMING_SNAKE_CASE ( self : Dict ): return { "do_resize": self.do_resize, "size": self.size, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_pad": self.do_pad, } def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Union[str, Any] , snake_case__ : Dict=False ): if not batched: lowerCAmelCase__ = image_inputs[0] if isinstance(snake_case__ , Image.Image ): lowerCAmelCase__ , lowerCAmelCase__ = image.size else: lowerCAmelCase__ , lowerCAmelCase__ = image.shape[1], image.shape[2] if w < h: lowerCAmelCase__ = int(self.size["""shortest_edge"""] * h / w ) lowerCAmelCase__ = self.size["""shortest_edge"""] elif w > h: lowerCAmelCase__ = self.size["""shortest_edge"""] lowerCAmelCase__ = int(self.size["""shortest_edge"""] * w / h ) else: lowerCAmelCase__ = self.size["""shortest_edge"""] lowerCAmelCase__ = self.size["""shortest_edge"""] else: lowerCAmelCase__ = [] for image in image_inputs: lowerCAmelCase__ , lowerCAmelCase__ = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowerCAmelCase__ = max(snake_case__ , key=lambda snake_case__ : item[0] )[0] lowerCAmelCase__ = max(snake_case__ , key=lambda snake_case__ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class a_ ( __UpperCamelCase , unittest.TestCase ): UpperCamelCase_ : int = DetrImageProcessor if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : Dict ): lowerCAmelCase__ = DetrImageProcessingTester(self ) @property def _SCREAMING_SNAKE_CASE ( self : str ): return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : int ): lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case__ , """image_mean""" ) ) self.assertTrue(hasattr(snake_case__ , """image_std""" ) ) self.assertTrue(hasattr(snake_case__ , """do_normalize""" ) ) self.assertTrue(hasattr(snake_case__ , """do_rescale""" ) ) self.assertTrue(hasattr(snake_case__ , """rescale_factor""" ) ) self.assertTrue(hasattr(snake_case__ , """do_resize""" ) ) self.assertTrue(hasattr(snake_case__ , """size""" ) ) self.assertTrue(hasattr(snake_case__ , """do_pad""" ) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} ) self.assertEqual(image_processor.do_pad , snake_case__ ) lowerCAmelCase__ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=snake_case__ ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} ) self.assertEqual(image_processor.do_pad , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): pass def _SCREAMING_SNAKE_CASE ( self : Dict ): # Initialize image_processing lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(snake_case__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ ) lowerCAmelCase__ = image_processing(snake_case__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): # Initialize image_processing lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , np.ndarray ) # Test not batched input lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(snake_case__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase__ = image_processing(snake_case__ , return_tensors="""pt""" ).pixel_values lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _SCREAMING_SNAKE_CASE ( self : int ): # Initialize image_processing lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , torch.Tensor ) # Test not batched input lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(snake_case__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase__ = image_processing(snake_case__ , return_tensors="""pt""" ).pixel_values lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): # prepare image and target lowerCAmelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f: lowerCAmelCase__ = json.loads(f.read() ) lowerCAmelCase__ = {"""image_id""": 39769, """annotations""": target} # encode them lowerCAmelCase__ = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" ) lowerCAmelCase__ = image_processing(images=snake_case__ , annotations=snake_case__ , return_tensors="""pt""" ) # verify pixel values lowerCAmelCase__ = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , snake_case__ ) lowerCAmelCase__ = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , snake_case__ , atol=1E-4 ) ) # verify area lowerCAmelCase__ = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , snake_case__ ) ) # verify boxes lowerCAmelCase__ = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , snake_case__ ) lowerCAmelCase__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , snake_case__ , atol=1E-3 ) ) # verify image_id lowerCAmelCase__ = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , snake_case__ ) ) # verify is_crowd lowerCAmelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , snake_case__ ) ) # verify class_labels lowerCAmelCase__ = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , snake_case__ ) ) # verify orig_size lowerCAmelCase__ = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , snake_case__ ) ) # verify size lowerCAmelCase__ = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , snake_case__ ) ) @slow def _SCREAMING_SNAKE_CASE ( self : Dict ): # prepare image, target and masks_path lowerCAmelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f: lowerCAmelCase__ = json.loads(f.read() ) lowerCAmelCase__ = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target} lowerCAmelCase__ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" ) # encode them lowerCAmelCase__ = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" ) lowerCAmelCase__ = image_processing(images=snake_case__ , annotations=snake_case__ , masks_path=snake_case__ , return_tensors="""pt""" ) # verify pixel values lowerCAmelCase__ = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["""pixel_values"""].shape , snake_case__ ) lowerCAmelCase__ = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , snake_case__ , atol=1E-4 ) ) # verify area lowerCAmelCase__ = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , snake_case__ ) ) # verify boxes lowerCAmelCase__ = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , snake_case__ ) lowerCAmelCase__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , snake_case__ , atol=1E-3 ) ) # verify image_id lowerCAmelCase__ = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , snake_case__ ) ) # verify is_crowd lowerCAmelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , snake_case__ ) ) # verify class_labels lowerCAmelCase__ = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , snake_case__ ) ) # verify masks lowerCAmelCase__ = 822873 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , snake_case__ ) # verify orig_size lowerCAmelCase__ = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , snake_case__ ) ) # verify size lowerCAmelCase__ = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , snake_case__ ) )
674
"""simple docstring""" import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class a_ : def __init__( self : Optional[int] , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=64 , snake_case__ : Any=None ): lowerCAmelCase__ = np.random.default_rng(snake_case__ ) lowerCAmelCase__ = length lowerCAmelCase__ = rng.normal(size=(length,) ).astype(np.floataa ) lowerCAmelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self : Optional[Any] ): return self.length def __getitem__( self : List[str] , snake_case__ : Optional[int] ): return {"x": self.x[i], "y": self.y[i]} class a_ ( torch.nn.Module ): def __init__( self : List[str] , snake_case__ : str=0 , snake_case__ : Dict=0 , snake_case__ : Any=False ): super().__init__() lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) lowerCAmelCase__ = True def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Any=None ): if self.first_batch: print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" ) lowerCAmelCase__ = False return x * self.a[0] + self.b[0] class a_ ( torch.nn.Module ): def __init__( self : Any , snake_case__ : Union[str, Any]=0 , snake_case__ : Union[str, Any]=0 , snake_case__ : List[Any]=False ): super().__init__() lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(snake_case__ ).float() ) lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(snake_case__ ).float() ) lowerCAmelCase__ = True def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Optional[Any]=None ): if self.first_batch: print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" ) lowerCAmelCase__ = False return x * self.a + self.b def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 16 ): """simple docstring""" from datasets import load_dataset from transformers import AutoTokenizer lowerCAmelCase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" ) lowerCAmelCase__ = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""} lowerCAmelCase__ = load_dataset("""csv""" , data_files=lowerCamelCase__ ) lowerCAmelCase__ = datasets["""train"""].unique("""label""" ) lowerCAmelCase__ = {v: i for i, v in enumerate(lowerCamelCase__ )} def tokenize_function(lowerCamelCase__ ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase__ = tokenizer( examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" ) if "label" in examples: lowerCAmelCase__ = [label_to_id[l] for l in examples["""label"""]] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowerCAmelCase__ = datasets.map( lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["""sentence1""", """sentence2""", """label"""] , ) def collate_fn(lowerCamelCase__ ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowerCamelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return tokenizer.pad(lowerCamelCase__ , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. lowerCAmelCase__ = DataLoader(tokenized_datasets["""train"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=2 ) lowerCAmelCase__ = DataLoader(tokenized_datasets["""validation"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=1 ) return train_dataloader, eval_dataloader
674
1
"""simple docstring""" def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" if discount_rate < 0: raise ValueError("""Discount rate cannot be negative""" ) if not cash_flows: raise ValueError("""Cash flows list cannot be empty""" ) lowerCAmelCase__ = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase__ ) ) return round(lowerCamelCase__ , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
674
"""simple docstring""" import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = MobileBertConfig.from_json_file(lowerCamelCase__ ) print(f"""Building PyTorch model from configuration: {config}""" ) lowerCAmelCase__ = MobileBertForPreTraining(lowerCamelCase__ ) # Load weights from tf checkpoint lowerCAmelCase__ = load_tf_weights_in_mobilebert(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , lowerCamelCase__ ) if __name__ == "__main__": __lowerCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--mobilebert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained MobileBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __lowerCAmelCase : Optional[int] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
674
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class a_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ): UpperCamelCase_ : Dict = StableDiffusionInstructPixaPixPipeline UpperCamelCase_ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"} UpperCamelCase_ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS UpperCamelCase_ : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS UpperCamelCase_ : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS def _SCREAMING_SNAKE_CASE ( self : int ): torch.manual_seed(0 ) lowerCAmelCase__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) lowerCAmelCase__ = PNDMScheduler(skip_prk_steps=snake_case__ ) torch.manual_seed(0 ) lowerCAmelCase__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCAmelCase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowerCAmelCase__ = CLIPTextModel(snake_case__ ) lowerCAmelCase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowerCAmelCase__ = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Optional[Any] , snake_case__ : List[str]=0 ): lowerCAmelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase__ = Image.fromarray(np.uinta(snake_case__ ) ).convert("""RGB""" ) if str(snake_case__ ).startswith("""mps""" ): lowerCAmelCase__ = torch.manual_seed(snake_case__ ) else: lowerCAmelCase__ = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) lowerCAmelCase__ = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """image_guidance_scale""": 1, """output_type""": """numpy""", } return inputs def _SCREAMING_SNAKE_CASE ( self : Tuple ): lowerCAmelCase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ = self.get_dummy_components() lowerCAmelCase__ = StableDiffusionInstructPixaPixPipeline(**snake_case__ ) lowerCAmelCase__ = sd_pipe.to(snake_case__ ) sd_pipe.set_progress_bar_config(disable=snake_case__ ) lowerCAmelCase__ = self.get_dummy_inputs(snake_case__ ) lowerCAmelCase__ = sd_pipe(**snake_case__ ).images lowerCAmelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCAmelCase__ = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): lowerCAmelCase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ = self.get_dummy_components() lowerCAmelCase__ = StableDiffusionInstructPixaPixPipeline(**snake_case__ ) lowerCAmelCase__ = sd_pipe.to(snake_case__ ) sd_pipe.set_progress_bar_config(disable=snake_case__ ) lowerCAmelCase__ = self.get_dummy_inputs(snake_case__ ) lowerCAmelCase__ = """french fries""" lowerCAmelCase__ = sd_pipe(**snake_case__ , negative_prompt=snake_case__ ) lowerCAmelCase__ = output.images lowerCAmelCase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCAmelCase__ = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _SCREAMING_SNAKE_CASE ( self : Any ): lowerCAmelCase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ = self.get_dummy_components() lowerCAmelCase__ = StableDiffusionInstructPixaPixPipeline(**snake_case__ ) lowerCAmelCase__ = sd_pipe.to(snake_case__ ) sd_pipe.set_progress_bar_config(disable=snake_case__ ) lowerCAmelCase__ = self.get_dummy_inputs(snake_case__ ) lowerCAmelCase__ = [inputs["""prompt"""]] * 2 lowerCAmelCase__ = np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0 lowerCAmelCase__ = torch.from_numpy(snake_case__ ).unsqueeze(0 ).to(snake_case__ ) lowerCAmelCase__ = image / 2 + 0.5 lowerCAmelCase__ = image.permute(0 , 3 , 1 , 2 ) lowerCAmelCase__ = image.repeat(2 , 1 , 1 , 1 ) lowerCAmelCase__ = sd_pipe(**snake_case__ ).images lowerCAmelCase__ = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) lowerCAmelCase__ = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _SCREAMING_SNAKE_CASE ( self : int ): lowerCAmelCase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ = self.get_dummy_components() lowerCAmelCase__ = EulerAncestralDiscreteScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" ) lowerCAmelCase__ = StableDiffusionInstructPixaPixPipeline(**snake_case__ ) lowerCAmelCase__ = sd_pipe.to(snake_case__ ) sd_pipe.set_progress_bar_config(disable=snake_case__ ) lowerCAmelCase__ = self.get_dummy_inputs(snake_case__ ) lowerCAmelCase__ = sd_pipe(**snake_case__ ).images lowerCAmelCase__ = image[0, -3:, -3:, -1] lowerCAmelCase__ = [round(snake_case__ , 4 ) for x in image_slice.flatten().tolist()] print(""",""".join([str(snake_case__ ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) lowerCAmelCase__ = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _SCREAMING_SNAKE_CASE ( self : Dict ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): lowerCAmelCase__ = self.get_dummy_components() lowerCAmelCase__ = StableDiffusionInstructPixaPixPipeline(**snake_case__ ) lowerCAmelCase__ = VaeImageProcessor(do_resize=snake_case__ , do_normalize=snake_case__ ) lowerCAmelCase__ = pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) lowerCAmelCase__ = pipe(**self.get_dummy_inputs_by_type(snake_case__ , input_image_type="""pt""" ) )[0] lowerCAmelCase__ = components["""vae"""] lowerCAmelCase__ = self.get_dummy_inputs_by_type(snake_case__ , input_image_type="""pt""" ) for image_param in self.image_latents_params: if image_param in inputs.keys(): lowerCAmelCase__ = vae.encode(inputs[image_param] ).latent_dist.mode() lowerCAmelCase__ = pipe(**snake_case__ )[0] lowerCAmelCase__ = np.abs(out - out_latents_inputs ).max() self.assertLess(snake_case__ , 1E-4 , """passing latents as image input generate different result from passing image""" ) @slow @require_torch_gpu class a_ ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : List[str] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : Any=0 ): lowerCAmelCase__ = torch.manual_seed(snake_case__ ) lowerCAmelCase__ = load_image( """https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" ) lowerCAmelCase__ = { """prompt""": """turn him into a cyborg""", """image""": image, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """image_guidance_scale""": 1.0, """output_type""": """numpy""", } return inputs def _SCREAMING_SNAKE_CASE ( self : Any ): lowerCAmelCase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained( """timbrooks/instruct-pix2pix""" , safety_checker=snake_case__ ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) pipe.enable_attention_slicing() lowerCAmelCase__ = self.get_inputs() lowerCAmelCase__ = pipe(**snake_case__ ).images lowerCAmelCase__ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowerCAmelCase__ = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def _SCREAMING_SNAKE_CASE ( self : Tuple ): lowerCAmelCase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained( """timbrooks/instruct-pix2pix""" , safety_checker=snake_case__ ) lowerCAmelCase__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) pipe.enable_attention_slicing() lowerCAmelCase__ = self.get_inputs() lowerCAmelCase__ = pipe(**snake_case__ ).images lowerCAmelCase__ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowerCAmelCase__ = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): lowerCAmelCase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained( """timbrooks/instruct-pix2pix""" , safety_checker=snake_case__ ) lowerCAmelCase__ = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) pipe.enable_attention_slicing() lowerCAmelCase__ = self.get_inputs() lowerCAmelCase__ = pipe(**snake_case__ ).images lowerCAmelCase__ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowerCAmelCase__ = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def _SCREAMING_SNAKE_CASE ( self : List[Any] ): lowerCAmelCase__ = 0 def callback_fn(snake_case__ : int , snake_case__ : int , snake_case__ : torch.FloatTensor ) -> None: lowerCAmelCase__ = True nonlocal number_of_steps number_of_steps += 1 if step == 1: lowerCAmelCase__ = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) lowerCAmelCase__ = latents[0, -3:, -3:, -1] lowerCAmelCase__ = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: lowerCAmelCase__ = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) lowerCAmelCase__ = latents[0, -3:, -3:, -1] lowerCAmelCase__ = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 lowerCAmelCase__ = False lowerCAmelCase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained( """timbrooks/instruct-pix2pix""" , safety_checker=snake_case__ , torch_dtype=torch.floataa ) lowerCAmelCase__ = pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) pipe.enable_attention_slicing() lowerCAmelCase__ = self.get_inputs() pipe(**snake_case__ , callback=snake_case__ , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def _SCREAMING_SNAKE_CASE ( self : Any ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCAmelCase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained( """timbrooks/instruct-pix2pix""" , safety_checker=snake_case__ , torch_dtype=torch.floataa ) lowerCAmelCase__ = pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowerCAmelCase__ = self.get_inputs() lowerCAmelCase__ = pipe(**snake_case__ ) lowerCAmelCase__ = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): lowerCAmelCase__ = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 lowerCAmelCase__ = inputs["""image"""].resize((504, 504) ) lowerCAmelCase__ = """timbrooks/instruct-pix2pix""" lowerCAmelCase__ = StableDiffusionInstructPixaPixPipeline.from_pretrained( snake_case__ , safety_checker=snake_case__ , ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) pipe.enable_attention_slicing() lowerCAmelCase__ = pipe(**snake_case__ ) lowerCAmelCase__ = output.images[0] lowerCAmelCase__ = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) lowerCAmelCase__ = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
674
"""simple docstring""" def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), f"""The input value of [n={number}] is not an integer""" if number == 1: return 2 elif number < 1: lowerCAmelCase__ = f"""The input value of [n={number}] has to be > 0""" raise ValueError(lowerCamelCase__ ) else: lowerCAmelCase__ = sylvester(number - 1 ) lowerCAmelCase__ = num - 1 lowerCAmelCase__ = num return lower * upper + 1 if __name__ == "__main__": print(F"The 8th number in Sylvester's sequence: {sylvester(8)}")
674
1
"""simple docstring""" def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" if curr_ind == len(lowerCamelCase__ ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(lowerCamelCase__ ) ): if valid_connection(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): # Insert current vertex into path as next transition lowerCAmelCase__ = next_ver # Validate created path if util_hamilton_cycle(lowerCamelCase__ , lowerCamelCase__ , curr_ind + 1 ): return True # Backtrack lowerCAmelCase__ = -1 return False def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 0 ): """simple docstring""" lowerCAmelCase__ = [-1] * (len(lowerCamelCase__ ) + 1) # initialize start and end of path with starting index lowerCAmelCase__ = lowerCAmelCase__ = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(lowerCamelCase__ , lowerCamelCase__ , 1 ) else []
674
"""simple docstring""" import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __lowerCAmelCase : Optional[Any] = get_tests_dir("fixtures/test_sentencepiece_no_bos.model") @require_sentencepiece @require_tokenizers class a_ ( __UpperCamelCase , unittest.TestCase ): UpperCamelCase_ : Tuple = PegasusTokenizer UpperCamelCase_ : Any = PegasusTokenizerFast UpperCamelCase_ : int = True UpperCamelCase_ : Any = True def _SCREAMING_SNAKE_CASE ( self : Tuple ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ = PegasusTokenizer(snake_case__ ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): return PegasusTokenizer.from_pretrained("""google/pegasus-large""" ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **snake_case__ : Optional[Any] ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Optional[Any] ): return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): lowerCAmelCase__ = """</s>""" lowerCAmelCase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<pad>""" ) self.assertEqual(vocab_keys[1] , """</s>""" ) self.assertEqual(vocab_keys[-1] , """v""" ) self.assertEqual(len(snake_case__ ) , 1103 ) def _SCREAMING_SNAKE_CASE ( self : Any ): self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def _SCREAMING_SNAKE_CASE ( self : Dict ): lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ = ( """Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important""" """ </s> <pad> <pad> <pad>""" ) lowerCAmelCase__ = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] lowerCAmelCase__ = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Dict ): lowerCAmelCase__ = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word lowerCAmelCase__ = """<mask_1> To ensure a <mask_2> flow of bank resolutions.""" lowerCAmelCase__ = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1] lowerCAmelCase__ = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : int ): lowerCAmelCase__ = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 96103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 lowerCAmelCase__ = """To ensure a smooth flow of bank resolutions.""" lowerCAmelCase__ = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1] lowerCAmelCase__ = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def _SCREAMING_SNAKE_CASE ( self : List[str] ): lowerCAmelCase__ = ["""This is going to be way too long.""" * 150, """short example"""] lowerCAmelCase__ = ["""not super long but more than 5 tokens""", """tiny"""] lowerCAmelCase__ = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" ) lowerCAmelCase__ = self._large_tokenizer( text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(snake_case__ ) == 2 # input_ids, attention_mask. @slow def _SCREAMING_SNAKE_CASE ( self : str ): # fmt: off lowerCAmelCase__ = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case__ , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , ) @require_sentencepiece @require_tokenizers class a_ ( __UpperCamelCase , unittest.TestCase ): UpperCamelCase_ : str = PegasusTokenizer UpperCamelCase_ : Optional[int] = PegasusTokenizerFast UpperCamelCase_ : Union[str, Any] = True UpperCamelCase_ : Optional[int] = True def _SCREAMING_SNAKE_CASE ( self : List[str] ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ = PegasusTokenizer(snake_case__ , offset=0 , mask_token_sent=snake_case__ , mask_token="""[MASK]""" ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : Dict ): return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **snake_case__ : List[Any] ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Dict ): return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : List[str] ): lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ = ( """Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>""" """ <pad> <pad> <pad>""" ) lowerCAmelCase__ = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] lowerCAmelCase__ = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) @require_torch def _SCREAMING_SNAKE_CASE ( self : List[Any] ): lowerCAmelCase__ = ["""This is going to be way too long.""" * 1000, """short example"""] lowerCAmelCase__ = ["""not super long but more than 5 tokens""", """tiny"""] lowerCAmelCase__ = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" ) lowerCAmelCase__ = self._large_tokenizer( text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(snake_case__ ) == 2 # input_ids, attention_mask. def _SCREAMING_SNAKE_CASE ( self : List[Any] ): lowerCAmelCase__ = ( """This is an example string that is used to test the original TF implementation against the HF""" """ implementation""" ) lowerCAmelCase__ = self._large_tokenizer(snake_case__ ).input_ids self.assertListEqual( snake_case__ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
674
1
"""simple docstring""" __lowerCAmelCase : List[Any] = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" __lowerCAmelCase : Optional[int] = [{"type": "code", "content": INSTALL_CONTENT}] __lowerCAmelCase : str = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
674
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class a_ ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : int ): if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=snake_case__ , ) assert hasattr(self , """env""" ) def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Optional[Any] ): # configuration for running training on smdistributed Model Parallel lowerCAmelCase__ = { """enabled""": True, """processes_per_host""": 8, } lowerCAmelCase__ = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } lowerCAmelCase__ = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} lowerCAmelCase__ = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case__ , instance_type=self.instance_type , debugger_hook_config=snake_case__ , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 500, } , metric_definitions=self.env.metric_definitions , distribution=snake_case__ , py_version="""py36""" , ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : str ): TrainingJobAnalytics(snake_case__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : List[str] ): # create estimator lowerCAmelCase__ = self.create_estimator(snake_case__ ) # run training estimator.fit() # result dataframe lowerCAmelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) lowerCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCAmelCase__ = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , snake_case__ )
674
1
"""simple docstring""" from typing import List import numpy as np def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = {key: len(lowerCamelCase__ ) for key, value in gen_kwargs.items() if isinstance(lowerCamelCase__ , lowerCamelCase__ )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( """Sharding is ambiguous for this dataset: """ + """we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n""" + """\n""".join(f"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() ) + """\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, """ + """and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.""" ) ) lowerCAmelCase__ = max(lists_lengths.values() , default=0 ) return max(1 , lowerCamelCase__ ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = [] for group_idx in range(lowerCamelCase__ ): lowerCAmelCase__ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break lowerCAmelCase__ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 lowerCAmelCase__ = range(lowerCamelCase__ , start + num_shards_to_add ) shards_indices_per_group.append(lowerCamelCase__ ) return shards_indices_per_group def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = _number_of_shards_in_gen_kwargs(lowerCamelCase__ ) if num_shards == 1: return [dict(lowerCamelCase__ )] else: lowerCAmelCase__ = _distribute_shards(num_shards=lowerCamelCase__ , max_num_jobs=lowerCamelCase__ ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(lowerCamelCase__ ) ) ] def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , lowerCamelCase__ ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = {len(lowerCamelCase__ ) for value in gen_kwargs.values() if isinstance(lowerCamelCase__ , lowerCamelCase__ )} lowerCAmelCase__ = {} for size in list_sizes: lowerCAmelCase__ = list(range(lowerCamelCase__ ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes lowerCAmelCase__ = dict(lowerCamelCase__ ) for key, value in shuffled_kwargs.items(): if isinstance(lowerCamelCase__ , lowerCamelCase__ ): lowerCAmelCase__ = [value[i] for i in indices_per_size[len(lowerCamelCase__ )]] return shuffled_kwargs
674
"""simple docstring""" from math import pi, sqrt def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" if num <= 0: raise ValueError("""math domain error""" ) if num > 1_71.5: raise OverflowError("""math range error""" ) elif num - int(lowerCamelCase__ ) not in (0, 0.5): raise NotImplementedError("""num must be an integer or a half-integer""" ) elif num == 0.5: return sqrt(lowerCamelCase__ ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def _UpperCAmelCase ( ): """simple docstring""" assert gamma(0.5 ) == sqrt(lowerCamelCase__ ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() __lowerCAmelCase : Dict = 1.0 while num: __lowerCAmelCase : Any = float(input("Gamma of: ")) print(F"gamma({num}) = {gamma(num)}") print("\nEnter 0 to exit...")
674
1
"""simple docstring""" import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class a_ : def __init__( self : Dict , snake_case__ : Dict , snake_case__ : List[Any]=3 , snake_case__ : Any=32 , snake_case__ : List[Any]=3 , snake_case__ : Any=10 , snake_case__ : Optional[int]=[8, 16, 32, 64] , snake_case__ : Optional[int]=[1, 1, 2, 1] , snake_case__ : Any=True , snake_case__ : Union[str, Any]=True , snake_case__ : Dict="relu" , snake_case__ : str=3 , snake_case__ : Tuple=None , snake_case__ : Any=["stage2", "stage3", "stage4"] , snake_case__ : str=[2, 3, 4] , snake_case__ : Optional[Any]=1 , ): lowerCAmelCase__ = parent lowerCAmelCase__ = batch_size lowerCAmelCase__ = image_size lowerCAmelCase__ = num_channels lowerCAmelCase__ = embeddings_size lowerCAmelCase__ = hidden_sizes lowerCAmelCase__ = depths lowerCAmelCase__ = is_training lowerCAmelCase__ = use_labels lowerCAmelCase__ = hidden_act lowerCAmelCase__ = num_labels lowerCAmelCase__ = scope lowerCAmelCase__ = len(snake_case__ ) lowerCAmelCase__ = out_features lowerCAmelCase__ = out_indices lowerCAmelCase__ = num_groups def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ = None if self.use_labels: lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_labels ) lowerCAmelCase__ = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : Dict ): return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Union[str, Any] ): lowerCAmelCase__ = BitModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() lowerCAmelCase__ = model(snake_case__ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Optional[Any] ): lowerCAmelCase__ = self.num_labels lowerCAmelCase__ = BitForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() lowerCAmelCase__ = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : Optional[int] ): lowerCAmelCase__ = BitBackbone(config=snake_case__ ) model.to(snake_case__ ) model.eval() lowerCAmelCase__ = model(snake_case__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowerCAmelCase__ = None lowerCAmelCase__ = BitBackbone(config=snake_case__ ) model.to(snake_case__ ) model.eval() lowerCAmelCase__ = model(snake_case__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): lowerCAmelCase__ = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs lowerCAmelCase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class a_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ): UpperCamelCase_ : Optional[int] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () UpperCamelCase_ : List[str] = ( {"feature-extraction": BitModel, "image-classification": BitForImageClassification} if is_torch_available() else {} ) UpperCamelCase_ : str = False UpperCamelCase_ : Union[str, Any] = False UpperCamelCase_ : List[str] = False UpperCamelCase_ : Tuple = False UpperCamelCase_ : Optional[int] = False def _SCREAMING_SNAKE_CASE ( self : Any ): lowerCAmelCase__ = BitModelTester(self ) lowerCAmelCase__ = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : int ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _SCREAMING_SNAKE_CASE ( self : List[str] ): return @unittest.skip(reason="""Bit does not output attentions""" ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): pass @unittest.skip(reason="""Bit does not use inputs_embeds""" ) def _SCREAMING_SNAKE_CASE ( self : Any ): pass @unittest.skip(reason="""Bit does not support input and output embeddings""" ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): pass def _SCREAMING_SNAKE_CASE ( self : List[str] ): lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ = model_class(snake_case__ ) lowerCAmelCase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ = [*signature.parameters.keys()] lowerCAmelCase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Dict ): lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : int ): lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ = model_class(config=snake_case__ ) for name, module in model.named_modules(): if isinstance(snake_case__ , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): def check_hidden_states_output(snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : Any ): lowerCAmelCase__ = model_class(snake_case__ ) model.to(snake_case__ ) model.eval() with torch.no_grad(): lowerCAmelCase__ = model(**self._prepare_for_class(snake_case__ , snake_case__ ) ) lowerCAmelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCAmelCase__ = self.model_tester.num_stages self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ = ["""preactivation""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: lowerCAmelCase__ = layer_type lowerCAmelCase__ = True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase__ = True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) @unittest.skip(reason="""Bit does not use feedforward chunking""" ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): pass def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) @slow def _SCREAMING_SNAKE_CASE ( self : int ): for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ = BitModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def _UpperCAmelCase ( ): """simple docstring""" lowerCAmelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class a_ ( unittest.TestCase ): @cached_property def _SCREAMING_SNAKE_CASE ( self : Tuple ): return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): lowerCAmelCase__ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case__ ) lowerCAmelCase__ = self.default_image_processor lowerCAmelCase__ = prepare_img() lowerCAmelCase__ = image_processor(images=snake_case__ , return_tensors="""pt""" ).to(snake_case__ ) # forward pass with torch.no_grad(): lowerCAmelCase__ = model(**snake_case__ ) # verify the logits lowerCAmelCase__ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , snake_case__ ) lowerCAmelCase__ = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(snake_case__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) ) @require_torch class a_ ( __UpperCamelCase , unittest.TestCase ): UpperCamelCase_ : Union[str, Any] = (BitBackbone,) if is_torch_available() else () UpperCamelCase_ : Tuple = BitConfig UpperCamelCase_ : Optional[int] = False def _SCREAMING_SNAKE_CASE ( self : List[str] ): lowerCAmelCase__ = BitModelTester(self )
674
"""simple docstring""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class a_ : def __init__( self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Any=13 , snake_case__ : int=30 , snake_case__ : int=2 , snake_case__ : Union[str, Any]=3 , snake_case__ : Dict=True , snake_case__ : Optional[int]=True , snake_case__ : List[Any]=32 , snake_case__ : List[str]=2 , snake_case__ : Optional[Any]=4 , snake_case__ : Optional[int]=37 , snake_case__ : Tuple="gelu" , snake_case__ : str=0.1 , snake_case__ : Any=0.1 , snake_case__ : int=10 , snake_case__ : Dict=0.02 , snake_case__ : Union[str, Any]=3 , snake_case__ : str=None , snake_case__ : List[Any]=2 , ): lowerCAmelCase__ = parent lowerCAmelCase__ = batch_size lowerCAmelCase__ = image_size lowerCAmelCase__ = patch_size lowerCAmelCase__ = num_channels lowerCAmelCase__ = is_training lowerCAmelCase__ = use_labels lowerCAmelCase__ = hidden_size lowerCAmelCase__ = num_hidden_layers lowerCAmelCase__ = num_attention_heads lowerCAmelCase__ = intermediate_size lowerCAmelCase__ = hidden_act lowerCAmelCase__ = hidden_dropout_prob lowerCAmelCase__ = attention_probs_dropout_prob lowerCAmelCase__ = type_sequence_label_size lowerCAmelCase__ = initializer_range lowerCAmelCase__ = scope lowerCAmelCase__ = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) lowerCAmelCase__ = (image_size // patch_size) ** 2 lowerCAmelCase__ = num_patches + 2 def _SCREAMING_SNAKE_CASE ( self : Any ): lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ = None if self.use_labels: lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : List[Any] ): return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : List[str] ): lowerCAmelCase__ = TFDeiTModel(config=snake_case__ ) lowerCAmelCase__ = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict ): lowerCAmelCase__ = TFDeiTForMaskedImageModeling(config=snake_case__ ) lowerCAmelCase__ = model(snake_case__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCAmelCase__ = 1 lowerCAmelCase__ = TFDeiTForMaskedImageModeling(snake_case__ ) lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase__ = model(snake_case__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Tuple ): lowerCAmelCase__ = self.type_sequence_label_size lowerCAmelCase__ = TFDeiTForImageClassification(snake_case__ ) lowerCAmelCase__ = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCAmelCase__ = 1 lowerCAmelCase__ = TFDeiTForImageClassification(snake_case__ ) lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase__ = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): lowerCAmelCase__ = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs lowerCAmelCase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class a_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ): UpperCamelCase_ : Optional[Any] = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) UpperCamelCase_ : Any = ( { "feature-extraction": TFDeiTModel, "image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) UpperCamelCase_ : Optional[Any] = False UpperCamelCase_ : Optional[Any] = False UpperCamelCase_ : Optional[int] = False UpperCamelCase_ : int = False def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): lowerCAmelCase__ = TFDeiTModelTester(self ) lowerCAmelCase__ = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="""DeiT does not use inputs_embeds""" ) def _SCREAMING_SNAKE_CASE ( self : Any ): pass def _SCREAMING_SNAKE_CASE ( self : str ): lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ = model_class(snake_case__ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) lowerCAmelCase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case__ , tf.keras.layers.Dense ) ) def _SCREAMING_SNAKE_CASE ( self : Dict ): lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ = model_class(snake_case__ ) lowerCAmelCase__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ = [*signature.parameters.keys()] lowerCAmelCase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : int ): lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : int ): lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Union[str, Any]=False ): lowerCAmelCase__ = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def _SCREAMING_SNAKE_CASE ( self : Any ): for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ = TFDeiTModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def _UpperCAmelCase ( ): """simple docstring""" lowerCAmelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class a_ ( unittest.TestCase ): @cached_property def _SCREAMING_SNAKE_CASE ( self : Any ): return ( DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ) if is_vision_available() else None ) @slow def _SCREAMING_SNAKE_CASE ( self : List[Any] ): lowerCAmelCase__ = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ) lowerCAmelCase__ = self.default_image_processor lowerCAmelCase__ = prepare_img() lowerCAmelCase__ = image_processor(images=snake_case__ , return_tensors="""tf""" ) # forward pass lowerCAmelCase__ = model(**snake_case__ ) # verify the logits lowerCAmelCase__ = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , snake_case__ ) lowerCAmelCase__ = tf.constant([-1.0266, 0.1912, -1.2861] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
674
1
"""simple docstring""" import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 __lowerCAmelCase : Optional[Any] = data_utils.TransfoXLTokenizer __lowerCAmelCase : List[Any] = data_utils.TransfoXLCorpus __lowerCAmelCase : List[Any] = data_utils __lowerCAmelCase : Any = data_utils def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(lowerCamelCase__ , """rb""" ) as fp: lowerCAmelCase__ = pickle.load(lowerCamelCase__ , encoding="""latin1""" ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) lowerCAmelCase__ = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""pretrained_vocab_file"""] print(f"""Save vocabulary to {pytorch_vocab_dump_path}""" ) lowerCAmelCase__ = corpus.vocab.__dict__ torch.save(lowerCamelCase__ , lowerCamelCase__ ) lowerCAmelCase__ = corpus.__dict__ corpus_dict_no_vocab.pop("""vocab""" , lowerCamelCase__ ) lowerCAmelCase__ = pytorch_dump_folder_path + """/""" + CORPUS_NAME print(f"""Save dataset to {pytorch_dataset_dump_path}""" ) torch.save(lowerCamelCase__ , lowerCamelCase__ ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model lowerCAmelCase__ = os.path.abspath(lowerCamelCase__ ) lowerCAmelCase__ = os.path.abspath(lowerCamelCase__ ) print(f"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" ) # Initialise PyTorch model if transfo_xl_config_file == "": lowerCAmelCase__ = TransfoXLConfig() else: lowerCAmelCase__ = TransfoXLConfig.from_json_file(lowerCamelCase__ ) print(f"""Building PyTorch model from configuration: {config}""" ) lowerCAmelCase__ = TransfoXLLMHeadModel(lowerCamelCase__ ) lowerCAmelCase__ = load_tf_weights_in_transfo_xl(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Save pytorch-model lowerCAmelCase__ = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) lowerCAmelCase__ = os.path.join(lowerCamelCase__ , lowerCamelCase__ ) print(f"""Save PyTorch model to {os.path.abspath(lowerCamelCase__ )}""" ) torch.save(model.state_dict() , lowerCamelCase__ ) print(f"""Save configuration file to {os.path.abspath(lowerCamelCase__ )}""" ) with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __lowerCAmelCase : List[str] = argparse.ArgumentParser() parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--tf_checkpoint_path", default="", type=str, help="An optional path to a TensorFlow checkpoint path to be converted.", ) parser.add_argument( "--transfo_xl_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--transfo_xl_dataset_file", default="", type=str, help="An optional dataset file to be converted in a vocabulary.", ) __lowerCAmelCase : str = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
674
"""simple docstring""" from __future__ import annotations from math import gcd def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 2 , lowerCamelCase__ = 1 , lowerCamelCase__ = 3 , ): """simple docstring""" if num < 2: raise ValueError("""The input value cannot be less than 2""" ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int: return (pow(lowerCamelCase__ , 2 ) + step) % modulus for _ in range(lowerCamelCase__ ): # These track the position within the cycle detection logic. lowerCAmelCase__ = seed lowerCAmelCase__ = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. lowerCAmelCase__ = gcd(hare - tortoise , lowerCamelCase__ ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. lowerCAmelCase__ = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse __lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( "num", type=int, help="The value to find a divisor of", ) parser.add_argument( "--attempts", type=int, default=3, help="The number of attempts before giving up", ) __lowerCAmelCase : List[str] = parser.parse_args() __lowerCAmelCase : Dict = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(F"{args.num} is probably prime") else: __lowerCAmelCase : List[str] = args.num // divisor print(F"{args.num} = {divisor} * {quotient}")
674
1
"""simple docstring""" import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( """files""" , [ ["""full:README.md""", """dataset_infos.json"""], ["""empty:README.md""", """dataset_infos.json"""], ["""dataset_infos.json"""], ["""full:README.md"""], ] , ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = tmp_path_factory.mktemp("""dset_infos_dir""" ) if "full:README.md" in files: with open(dataset_infos_dir / """README.md""" , """w""" ) as f: f.write("""---\ndataset_info:\n dataset_size: 42\n---""" ) if "empty:README.md" in files: with open(dataset_infos_dir / """README.md""" , """w""" ) as f: f.write("""""" ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f: f.write("""{\"default\": {\"dataset_size\": 42}}""" ) lowerCAmelCase__ = DatasetInfosDict.from_directory(lowerCamelCase__ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( """dataset_info""" , [ DatasetInfo(), DatasetInfo( description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ), ] , ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = str(lowerCamelCase__ ) dataset_info.write_to_directory(lowerCamelCase__ ) lowerCAmelCase__ = DatasetInfo.from_directory(lowerCamelCase__ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(lowerCamelCase__ , """dataset_info.json""" ) ) def _UpperCAmelCase ( ): """simple docstring""" lowerCAmelCase__ = DatasetInfo( description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) lowerCAmelCase__ = dataset_info._to_yaml_dict() assert sorted(lowerCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) lowerCAmelCase__ = yaml.safe_dump(lowerCamelCase__ ) lowerCAmelCase__ = yaml.safe_load(lowerCamelCase__ ) assert dataset_info_yaml_dict == reloaded def _UpperCAmelCase ( ): """simple docstring""" lowerCAmelCase__ = DatasetInfo() lowerCAmelCase__ = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( """dataset_infos_dict""" , [ DatasetInfosDict(), DatasetInfosDict({"""default""": DatasetInfo()} ), DatasetInfosDict({"""my_config_name""": DatasetInfo()} ), DatasetInfosDict( { """default""": DatasetInfo( description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ) } ), DatasetInfosDict( { """v1""": DatasetInfo(dataset_size=42 ), """v2""": DatasetInfo(dataset_size=1337 ), } ), ] , ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = str(lowerCamelCase__ ) dataset_infos_dict.write_to_directory(lowerCamelCase__ ) lowerCAmelCase__ = DatasetInfosDict.from_directory(lowerCamelCase__ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): lowerCAmelCase__ = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml lowerCAmelCase__ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(lowerCamelCase__ , """README.md""" ) )
674
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = TapasConfig.from_json_file(lowerCamelCase__ ) # set absolute/relative position embeddings parameter lowerCAmelCase__ = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ ) elif task == "WTQ": # run_task_main.py hparams lowerCAmelCase__ = 4 lowerCAmelCase__ = True # hparam_utils.py hparams lowerCAmelCase__ = 0.66_46_94 lowerCAmelCase__ = 0.20_79_51 lowerCAmelCase__ = 0.12_11_94 lowerCAmelCase__ = True lowerCAmelCase__ = True lowerCAmelCase__ = False lowerCAmelCase__ = 0.0_35_25_13 lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams lowerCAmelCase__ = 4 lowerCAmelCase__ = False # hparam_utils.py hparams lowerCAmelCase__ = 36.45_19 lowerCAmelCase__ = 0.90_34_21 lowerCAmelCase__ = 2_22.0_88 lowerCAmelCase__ = True lowerCAmelCase__ = True lowerCAmelCase__ = True lowerCAmelCase__ = 0.76_31_41 lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ ) elif task == "TABFACT": lowerCAmelCase__ = TapasForSequenceClassification(config=lowerCamelCase__ ) elif task == "MLM": lowerCAmelCase__ = TapasForMaskedLM(config=lowerCamelCase__ ) elif task == "INTERMEDIATE_PRETRAINING": lowerCAmelCase__ = TapasModel(config=lowerCamelCase__ ) else: raise ValueError(f"""Task {task} not supported.""" ) print(f"""Building PyTorch model from configuration: {config}""" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Save pytorch-model (weights and configuration) print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(lowerCamelCase__ ) # Save tokenizer files print(f"""Save tokenizer files to {pytorch_dump_path}""" ) lowerCAmelCase__ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 ) tokenizer.save_pretrained(lowerCamelCase__ ) print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": __lowerCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA." ) parser.add_argument( "--reset_position_index_per_cell", default=False, action="store_true", help="Whether to use relative position embeddings or not. Defaults to True.", ) parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--tapas_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained TAPAS model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __lowerCAmelCase : Union[str, Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
674
1
"""simple docstring""" __lowerCAmelCase : Optional[Any] = { "A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.", "H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.", "O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-", "V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----", "2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...", "8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.", ":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.", "?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-", "(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/" } # Exclamation mark is not in ITU-R recommendation # fmt: on __lowerCAmelCase : Dict = {value: key for key, value in MORSE_CODE_DICT.items()} def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" return " ".join(MORSE_CODE_DICT[char] for char in message.upper() ) def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" return "".join(REVERSE_DICT[char] for char in message.split() ) def _UpperCAmelCase ( ): """simple docstring""" lowerCAmelCase__ = """Morse code here!""" print(lowerCamelCase__ ) lowerCAmelCase__ = encrypt(lowerCamelCase__ ) print(lowerCamelCase__ ) lowerCAmelCase__ = decrypt(lowerCamelCase__ ) print(lowerCamelCase__ ) if __name__ == "__main__": main()
674
"""simple docstring""" def _UpperCAmelCase ( lowerCamelCase__ = 50 ): """simple docstring""" lowerCAmelCase__ = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(F"{solution() = }")
674
1
"""simple docstring""" import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class a_ ( __UpperCamelCase ): @require_torch def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched lowerCAmelCase__ = """ from transformers import BertConfig, BertModel, BertTokenizer, pipeline """ lowerCAmelCase__ = """ mname = \"hf-internal-testing/tiny-random-bert\" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task=\"fill-mask\", model=mname) print(\"success\") """ lowerCAmelCase__ = """ import socket def offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\") socket.socket = offline_socket """ # Force fetching the files so that we can use the cache lowerCAmelCase__ = """hf-internal-testing/tiny-random-bert""" BertConfig.from_pretrained(snake_case__ ) BertModel.from_pretrained(snake_case__ ) BertTokenizer.from_pretrained(snake_case__ ) pipeline(task="""fill-mask""" , model=snake_case__ ) # baseline - just load from_pretrained with normal network lowerCAmelCase__ = [sys.executable, """-c""", """\n""".join([load, run, mock] )] # should succeed lowerCAmelCase__ = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files lowerCAmelCase__ = """1""" lowerCAmelCase__ = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("""success""" , result.stdout.decode() ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): # python one-liner segments # this must be loaded before socket.socket is monkey-patched lowerCAmelCase__ = """ from transformers import BertConfig, BertModel, BertTokenizer, pipeline """ lowerCAmelCase__ = """ mname = \"hf-internal-testing/tiny-random-bert\" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task=\"fill-mask\", model=mname) print(\"success\") """ lowerCAmelCase__ = """ import socket def offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\") socket.socket = offline_socket """ # Force fetching the files so that we can use the cache lowerCAmelCase__ = """hf-internal-testing/tiny-random-bert""" BertConfig.from_pretrained(snake_case__ ) BertModel.from_pretrained(snake_case__ ) BertTokenizer.from_pretrained(snake_case__ ) pipeline(task="""fill-mask""" , model=snake_case__ ) # baseline - just load from_pretrained with normal network lowerCAmelCase__ = [sys.executable, """-c""", """\n""".join([load, run, mock] )] # should succeed lowerCAmelCase__ = self.get_env() lowerCAmelCase__ = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("""success""" , result.stdout.decode() ) @require_torch def _SCREAMING_SNAKE_CASE ( self : int ): # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched lowerCAmelCase__ = """ from transformers import BertConfig, BertModel, BertTokenizer """ lowerCAmelCase__ = """ mname = \"hf-internal-testing/tiny-random-bert-sharded\" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) print(\"success\") """ lowerCAmelCase__ = """ import socket def offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\") socket.socket = offline_socket """ # baseline - just load from_pretrained with normal network lowerCAmelCase__ = [sys.executable, """-c""", """\n""".join([load, run] )] # should succeed lowerCAmelCase__ = self.get_env() lowerCAmelCase__ = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("""success""" , result.stdout.decode() ) # next emulate no network lowerCAmelCase__ = [sys.executable, """-c""", """\n""".join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files lowerCAmelCase__ = """1""" lowerCAmelCase__ = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("""success""" , result.stdout.decode() ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): lowerCAmelCase__ = """ from transformers import pipeline """ lowerCAmelCase__ = """ mname = \"hf-internal-testing/tiny-random-bert\" pipe = pipeline(model=mname) """ lowerCAmelCase__ = """ import socket def offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\") socket.socket = offline_socket """ lowerCAmelCase__ = self.get_env() lowerCAmelCase__ = """1""" lowerCAmelCase__ = [sys.executable, """-c""", """\n""".join([load, mock, run] )] lowerCAmelCase__ = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ ) self.assertEqual(result.returncode , 1 , result.stderr ) self.assertIn( """You cannot infer task automatically within `pipeline` when using offline mode""" , result.stderr.decode().replace("""\n""" , """""" ) , ) @require_torch def _SCREAMING_SNAKE_CASE ( self : int ): lowerCAmelCase__ = """ from transformers import AutoModel """ lowerCAmelCase__ = """ mname = \"hf-internal-testing/test_dynamic_model\" AutoModel.from_pretrained(mname, trust_remote_code=True) print(\"success\") """ # baseline - just load from_pretrained with normal network lowerCAmelCase__ = [sys.executable, """-c""", """\n""".join([load, run] )] # should succeed lowerCAmelCase__ = self.get_env() lowerCAmelCase__ = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("""success""" , result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files lowerCAmelCase__ = """1""" lowerCAmelCase__ = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("""success""" , result.stdout.decode() )
674
"""simple docstring""" import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse("0.8.3"): raise Exception("requires gluonnlp == 0.8.3") if version.parse(mx.__version__) != version.parse("1.5.0"): raise Exception("requires mxnet == 1.5.0") logging.set_verbosity_info() __lowerCAmelCase : Any = logging.get_logger(__name__) __lowerCAmelCase : Any = "The Nymphenburg Palace is a beautiful palace in Munich!" def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = { """attention_cell""": """multi_head""", """num_layers""": 4, """units""": 1024, """hidden_size""": 768, """max_length""": 512, """num_heads""": 8, """scaled""": True, """dropout""": 0.1, """use_residual""": True, """embed_size""": 1024, """embed_dropout""": 0.1, """word_embed""": None, """layer_norm_eps""": 1e-5, """token_type_vocab_size""": 2, } lowerCAmelCase__ = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py lowerCAmelCase__ = BERTEncoder( attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=lowerCamelCase__ , output_all_encodings=lowerCamelCase__ , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , lowerCamelCase__ ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later lowerCAmelCase__ = """openwebtext_ccnews_stories_books_cased""" # Specify download folder to Gluonnlp's vocab lowerCAmelCase__ = os.path.join(get_home_dir() , """models""" ) lowerCAmelCase__ = _load_vocab(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , cls=lowerCamelCase__ ) lowerCAmelCase__ = nlp.model.BERTModel( lowerCamelCase__ , len(lowerCamelCase__ ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=lowerCamelCase__ , use_token_type_embed=lowerCamelCase__ , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=lowerCamelCase__ , use_decoder=lowerCamelCase__ , ) original_bort.load_parameters(lowerCamelCase__ , cast_dtype=lowerCamelCase__ , ignore_extra=lowerCamelCase__ ) lowerCAmelCase__ = original_bort._collect_params_with_prefix() # Build our config 🤗 lowerCAmelCase__ = { """architectures""": ["""BertForMaskedLM"""], """attention_probs_dropout_prob""": predefined_args["""dropout"""], """hidden_act""": """gelu""", """hidden_dropout_prob""": predefined_args["""dropout"""], """hidden_size""": predefined_args["""embed_size"""], """initializer_range""": 0.02, """intermediate_size""": predefined_args["""hidden_size"""], """layer_norm_eps""": predefined_args["""layer_norm_eps"""], """max_position_embeddings""": predefined_args["""max_length"""], """model_type""": """bort""", """num_attention_heads""": predefined_args["""num_heads"""], """num_hidden_layers""": predefined_args["""num_layers"""], """pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa """type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa """vocab_size""": len(lowerCamelCase__ ), } lowerCAmelCase__ = BertConfig.from_dict(lowerCamelCase__ ) lowerCAmelCase__ = BertForMaskedLM(lowerCamelCase__ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(lowerCamelCase__ ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(lowerCamelCase__ , lowerCamelCase__ ): lowerCAmelCase__ = hf_param.shape lowerCAmelCase__ = to_torch(params[gluon_param] ) lowerCAmelCase__ = gluon_param.shape assert ( shape_hf == shape_gluon ), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers""" return gluon_param lowerCAmelCase__ = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" ) lowerCAmelCase__ = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" ) lowerCAmelCase__ = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" ) lowerCAmelCase__ = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) lowerCAmelCase__ = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): lowerCAmelCase__ = hf_bort_model.bert.encoder.layer[i] # self attention lowerCAmelCase__ = layer.attention.self lowerCAmelCase__ = check_and_map_params( self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" ) lowerCAmelCase__ = check_and_map_params( self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" ) lowerCAmelCase__ = check_and_map_params( self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" ) lowerCAmelCase__ = check_and_map_params( self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" ) lowerCAmelCase__ = check_and_map_params( self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" ) lowerCAmelCase__ = check_and_map_params( self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" ) # self attention output lowerCAmelCase__ = layer.attention.output lowerCAmelCase__ = check_and_map_params( self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" ) lowerCAmelCase__ = check_and_map_params( self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" ) lowerCAmelCase__ = check_and_map_params( self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" ) lowerCAmelCase__ = check_and_map_params( self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" ) # intermediate lowerCAmelCase__ = layer.intermediate lowerCAmelCase__ = check_and_map_params( intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" ) lowerCAmelCase__ = check_and_map_params( intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" ) # output lowerCAmelCase__ = layer.output lowerCAmelCase__ = check_and_map_params( bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" ) lowerCAmelCase__ = check_and_map_params( bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" ) lowerCAmelCase__ = check_and_map_params( bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" ) lowerCAmelCase__ = check_and_map_params( bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models lowerCAmelCase__ = RobertaTokenizer.from_pretrained("""roberta-base""" ) lowerCAmelCase__ = tokenizer.encode_plus(lowerCamelCase__ )["""input_ids"""] # Get gluon output lowerCAmelCase__ = mx.nd.array([input_ids] ) lowerCAmelCase__ = original_bort(inputs=lowerCamelCase__ , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(lowerCamelCase__ ) lowerCAmelCase__ = BertModel.from_pretrained(lowerCamelCase__ ) hf_bort_model.eval() lowerCAmelCase__ = tokenizer.encode_plus(lowerCamelCase__ , return_tensors="""pt""" ) lowerCAmelCase__ = hf_bort_model(**lowerCamelCase__ )[0] lowerCAmelCase__ = output_gluon[0].asnumpy() lowerCAmelCase__ = output_hf[0].detach().numpy() lowerCAmelCase__ = np.max(np.abs(hf_layer - gluon_layer ) ).item() lowerCAmelCase__ = np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) if success: print("""✔️ Both model do output the same tensors""" ) else: print("""❌ Both model do **NOT** output the same tensors""" ) print("""Absolute difference is:""" , lowerCamelCase__ ) if __name__ == "__main__": __lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __lowerCAmelCase : str = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
674
1
"""simple docstring""" import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class a_ ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): lowerCAmelCase__ = 0 def _SCREAMING_SNAKE_CASE ( self : List[Any] ): lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" ) self.assertIsInstance(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json""" lowerCAmelCase__ = Path(snake_case__ ) / """config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json""" lowerCAmelCase__ = Path(snake_case__ ) / """config.json""" json.dump( {"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ = CLIPConfig() # Create a dummy config file with image_proceesor_type lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json""" lowerCAmelCase__ = Path(snake_case__ ) / """config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ).to_dict() config_dict.pop("""image_processor_type""" ) lowerCAmelCase__ = CLIPImageProcessor(**snake_case__ ) # save in new folder model_config.save_pretrained(snake_case__ ) config.save_pretrained(snake_case__ ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ) # make sure private variable is not incorrectly saved lowerCAmelCase__ = json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Dict ): with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): with self.assertRaisesRegex( snake_case__ , """clip-base is not a local folder and is not a valid model identifier""" ): lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""clip-base""" ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): with self.assertRaisesRegex( snake_case__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ , revision="""aaaaaa""" ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): with self.assertRaisesRegex( snake_case__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ): lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" ) def _SCREAMING_SNAKE_CASE ( self : Any ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(snake_case__ ): lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(snake_case__ ): lowerCAmelCase__ = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(snake_case__ ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ , trust_remote_code=snake_case__ ) self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" ) def _SCREAMING_SNAKE_CASE ( self : Dict ): try: AutoConfig.register("""custom""" , snake_case__ ) AutoImageProcessor.register(snake_case__ , snake_case__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(snake_case__ ): AutoImageProcessor.register(snake_case__ , snake_case__ ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json""" lowerCAmelCase__ = Path(snake_case__ ) / """config.json""" json.dump( {"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) ) lowerCAmelCase__ = CustomImageProcessor.from_pretrained(snake_case__ ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(snake_case__ ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def _SCREAMING_SNAKE_CASE ( self : List[str] ): class a_ ( __UpperCamelCase ): UpperCamelCase_ : Tuple = True try: AutoConfig.register("""custom""" , snake_case__ ) AutoImageProcessor.register(snake_case__ , snake_case__ ) # If remote code is not set, the default is to use local lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. lowerCAmelCase__ = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub lowerCAmelCase__ = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(not hasattr(snake_case__ , """is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
674
"""simple docstring""" import copy import os import cva import numpy as np from matplotlib import pyplot as plt class a_ : def __init__( self : Optional[int] ): lowerCAmelCase__ = """""" lowerCAmelCase__ = """""" lowerCAmelCase__ = [] lowerCAmelCase__ = 0 lowerCAmelCase__ = 256 lowerCAmelCase__ = 0 lowerCAmelCase__ = 0 lowerCAmelCase__ = 0 lowerCAmelCase__ = 0 def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Union[str, Any] ): lowerCAmelCase__ = cva.imread(snake_case__ , 0 ) lowerCAmelCase__ = copy.deepcopy(self.img ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = plt.hist(self.img.ravel() , 256 , [0, 256] , label="""x""" ) lowerCAmelCase__ = np.sum(snake_case__ ) for i in range(len(snake_case__ ) ): lowerCAmelCase__ = x[i] / self.k self.sk += prk lowerCAmelCase__ = (self.L - 1) * self.sk if self.rem != 0: lowerCAmelCase__ = int(last % last ) lowerCAmelCase__ = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(snake_case__ ) lowerCAmelCase__ = int(np.ma.count(self.img ) / self.img[1].size ) lowerCAmelCase__ = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): lowerCAmelCase__ = self.img[j][i] if num != self.last_list[num]: lowerCAmelCase__ = self.last_list[num] cva.imwrite("""output_data/output.jpg""" , self.img ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): plt.hist(self.img.ravel() , 256 , [0, 256] ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): cva.imshow("""Output-Image""" , self.img ) cva.imshow("""Input-Image""" , self.original_image ) cva.waitKey(5000 ) cva.destroyAllWindows() if __name__ == "__main__": __lowerCAmelCase : Dict = os.path.join(os.path.basename(__file__), "image_data/input.jpg") __lowerCAmelCase : Optional[int] = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
674
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase : str = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Tuple = [ "SEW_PRETRAINED_MODEL_ARCHIVE_LIST", "SEWForCTC", "SEWForSequenceClassification", "SEWModel", "SEWPreTrainedModel", ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys __lowerCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
674
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class a_ ( __UpperCamelCase ): UpperCamelCase_ : List[str] = ( "This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image." "It takes two arguments named `image` which should be the original image, and `label` which should be a text " "describing the elements what should be identified in the segmentation mask. The tool returns the mask." ) UpperCamelCase_ : str = "CIDAS/clipseg-rd64-refined" UpperCamelCase_ : Any = "image_segmenter" UpperCamelCase_ : Optional[Any] = CLIPSegForImageSegmentation UpperCamelCase_ : List[str] = ["image", "text"] UpperCamelCase_ : int = ["image"] def __init__( self : Tuple , *snake_case__ : str , **snake_case__ : Optional[Any] ): requires_backends(self , ["""vision"""] ) super().__init__(*snake_case__ , **snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : "Image" , snake_case__ : str ): return self.pre_processor(text=[label] , images=[image] , padding=snake_case__ , return_tensors="""pt""" ) def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Tuple ): with torch.no_grad(): lowerCAmelCase__ = self.model(**snake_case__ ).logits return logits def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : List[Any] ): lowerCAmelCase__ = outputs.cpu().detach().numpy() lowerCAmelCase__ = 0 lowerCAmelCase__ = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
674
1
"""simple docstring""" from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) __lowerCAmelCase : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name __lowerCAmelCase : List[Any] = "\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)[\"depth\"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline(\"depth-estimation\")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to(\"cuda\")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to(\"cuda\")\n\n\n >>> img = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/cat.png\"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")\n\n >>> prompt = \"A robot, 4k photo\"\n >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"\n\n >>> generator = torch.Generator(device=\"cuda\").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save(\"robot_cat.png\")\n ```\n" def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=8 ): """simple docstring""" lowerCAmelCase__ = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 lowerCAmelCase__ = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class a_ ( __UpperCamelCase ): def __init__( self : int , snake_case__ : UNetaDConditionModel , snake_case__ : DDPMScheduler , snake_case__ : VQModel , ): super().__init__() self.register_modules( unet=snake_case__ , scheduler=snake_case__ , movq=snake_case__ , ) lowerCAmelCase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1) def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Any , snake_case__ : str , snake_case__ : Union[str, Any] ): if latents is None: lowerCAmelCase__ = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ ) else: if latents.shape != shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" ) lowerCAmelCase__ = latents.to(snake_case__ ) lowerCAmelCase__ = latents * scheduler.init_noise_sigma return latents def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : str=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) lowerCAmelCase__ = torch.device(F"""cuda:{gpu_id}""" ) lowerCAmelCase__ = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : List[Any]=0 ): if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" ) lowerCAmelCase__ = torch.device(F"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to("""cpu""" , silence_dtype_warnings=snake_case__ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) lowerCAmelCase__ = None for cpu_offloaded_model in [self.unet, self.movq]: lowerCAmelCase__ , lowerCAmelCase__ = cpu_offload_with_hook(snake_case__ , snake_case__ , prev_module_hook=snake_case__ ) # We'll offload the last model manually. lowerCAmelCase__ = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _SCREAMING_SNAKE_CASE ( self : List[Any] ): if not hasattr(self.unet , """_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(snake_case__ , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(snake_case__ ) def __call__( self : Optional[int] , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : torch.FloatTensor , snake_case__ : int = 512 , snake_case__ : int = 512 , snake_case__ : int = 100 , snake_case__ : float = 4.0 , snake_case__ : int = 1 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , ): lowerCAmelCase__ = self._execution_device lowerCAmelCase__ = guidance_scale > 1.0 if isinstance(snake_case__ , snake_case__ ): lowerCAmelCase__ = torch.cat(snake_case__ , dim=0 ) if isinstance(snake_case__ , snake_case__ ): lowerCAmelCase__ = torch.cat(snake_case__ , dim=0 ) if isinstance(snake_case__ , snake_case__ ): lowerCAmelCase__ = torch.cat(snake_case__ , dim=0 ) lowerCAmelCase__ = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: lowerCAmelCase__ = image_embeds.repeat_interleave(snake_case__ , dim=0 ) lowerCAmelCase__ = negative_image_embeds.repeat_interleave(snake_case__ , dim=0 ) lowerCAmelCase__ = hint.repeat_interleave(snake_case__ , dim=0 ) lowerCAmelCase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case__ ) lowerCAmelCase__ = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case__ ) self.scheduler.set_timesteps(snake_case__ , device=snake_case__ ) lowerCAmelCase__ = self.scheduler.timesteps lowerCAmelCase__ = self.movq.config.latent_channels lowerCAmelCase__ , lowerCAmelCase__ = downscale_height_and_width(snake_case__ , snake_case__ , self.movq_scale_factor ) # create initial latent lowerCAmelCase__ = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , snake_case__ , snake_case__ , snake_case__ , self.scheduler , ) for i, t in enumerate(self.progress_bar(snake_case__ ) ): # expand the latents if we are doing classifier free guidance lowerCAmelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowerCAmelCase__ = {"""image_embeds""": image_embeds, """hint""": hint} lowerCAmelCase__ = self.unet( sample=snake_case__ , timestep=snake_case__ , encoder_hidden_states=snake_case__ , added_cond_kwargs=snake_case__ , return_dict=snake_case__ , )[0] if do_classifier_free_guidance: lowerCAmelCase__ , lowerCAmelCase__ = noise_pred.split(latents.shape[1] , dim=1 ) lowerCAmelCase__ , lowerCAmelCase__ = noise_pred.chunk(2 ) lowerCAmelCase__ , lowerCAmelCase__ = variance_pred.chunk(2 ) lowerCAmelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) lowerCAmelCase__ = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , """variance_type""" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): lowerCAmelCase__ , lowerCAmelCase__ = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 lowerCAmelCase__ = self.scheduler.step( snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ , )[0] # post-processing lowerCAmelCase__ = self.movq.decode(snake_case__ , force_not_quantize=snake_case__ )["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: lowerCAmelCase__ = image * 0.5 + 0.5 lowerCAmelCase__ = image.clamp(0 , 1 ) lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": lowerCAmelCase__ = self.numpy_to_pil(snake_case__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=snake_case__ )
674
"""simple docstring""" import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class a_ ( __UpperCamelCase , unittest.TestCase ): UpperCamelCase_ : str = LayoutLMTokenizer UpperCamelCase_ : List[Any] = LayoutLMTokenizerFast UpperCamelCase_ : Dict = True UpperCamelCase_ : Any = True def _SCREAMING_SNAKE_CASE ( self : Tuple ): super().setUp() lowerCAmelCase__ = [ """[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def _SCREAMING_SNAKE_CASE ( self : int , **snake_case__ : Union[str, Any] ): return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Tuple ): lowerCAmelCase__ = """UNwant\u00E9d,running""" lowerCAmelCase__ = """unwanted, running""" return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): lowerCAmelCase__ = self.tokenizer_class(self.vocab_file ) lowerCAmelCase__ = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(snake_case__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [7, 4, 5, 10, 8, 9] ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): pass
674
1
"""simple docstring""" import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html __lowerCAmelCase : Any = "platform" import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , ): """simple docstring""" if attention_mask is None: lowerCAmelCase__ = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: lowerCAmelCase__ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: lowerCAmelCase__ = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowerCAmelCase__ = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowerCAmelCase__ = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class a_ : def __init__( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : List[Any]=13 , snake_case__ : List[str]=7 , snake_case__ : Optional[int]=True , snake_case__ : Optional[int]=False , snake_case__ : Any=99 , snake_case__ : List[str]=16 , snake_case__ : Dict=2 , snake_case__ : Union[str, Any]=4 , snake_case__ : Dict=4 , snake_case__ : Dict="gelu" , snake_case__ : Any=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : Any=32 , snake_case__ : Tuple=2 , snake_case__ : List[str]=1 , snake_case__ : Dict=0 , snake_case__ : Optional[int]=0.02 , ): lowerCAmelCase__ = parent lowerCAmelCase__ = batch_size lowerCAmelCase__ = seq_length lowerCAmelCase__ = is_training lowerCAmelCase__ = use_labels lowerCAmelCase__ = vocab_size lowerCAmelCase__ = hidden_size lowerCAmelCase__ = num_hidden_layers lowerCAmelCase__ = num_attention_heads lowerCAmelCase__ = intermediate_size lowerCAmelCase__ = hidden_act lowerCAmelCase__ = hidden_dropout_prob lowerCAmelCase__ = attention_probs_dropout_prob lowerCAmelCase__ = max_position_embeddings lowerCAmelCase__ = eos_token_id lowerCAmelCase__ = pad_token_id lowerCAmelCase__ = bos_token_id lowerCAmelCase__ = initializer_range def _SCREAMING_SNAKE_CASE ( self : Dict ): lowerCAmelCase__ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) lowerCAmelCase__ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) lowerCAmelCase__ = shift_tokens_right(snake_case__ , 1 , 2 ) lowerCAmelCase__ = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=snake_case__ , ) lowerCAmelCase__ = prepare_blenderbot_inputs_dict(snake_case__ , snake_case__ , snake_case__ ) return config, inputs_dict def _SCREAMING_SNAKE_CASE ( self : str ): lowerCAmelCase__ , lowerCAmelCase__ = self.prepare_config_and_inputs() return config, inputs_dict def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[Any] ): lowerCAmelCase__ = 20 lowerCAmelCase__ = model_class_name(snake_case__ ) lowerCAmelCase__ = model.encode(inputs_dict["""input_ids"""] ) lowerCAmelCase__ , lowerCAmelCase__ = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) lowerCAmelCase__ = model.init_cache(decoder_input_ids.shape[0] , snake_case__ , snake_case__ ) lowerCAmelCase__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) lowerCAmelCase__ = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowerCAmelCase__ = model.decode( decoder_input_ids[:, :-1] , snake_case__ , decoder_attention_mask=snake_case__ , past_key_values=snake_case__ , decoder_position_ids=snake_case__ , ) lowerCAmelCase__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) lowerCAmelCase__ = model.decode( decoder_input_ids[:, -1:] , snake_case__ , decoder_attention_mask=snake_case__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=snake_case__ , ) lowerCAmelCase__ = model.decode(snake_case__ , snake_case__ ) lowerCAmelCase__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Tuple ): lowerCAmelCase__ = 20 lowerCAmelCase__ = model_class_name(snake_case__ ) lowerCAmelCase__ = model.encode(inputs_dict["""input_ids"""] ) lowerCAmelCase__ , lowerCAmelCase__ = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) lowerCAmelCase__ = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) lowerCAmelCase__ = model.init_cache(decoder_input_ids.shape[0] , snake_case__ , snake_case__ ) lowerCAmelCase__ = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowerCAmelCase__ = model.decode( decoder_input_ids[:, :-1] , snake_case__ , decoder_attention_mask=snake_case__ , past_key_values=snake_case__ , decoder_position_ids=snake_case__ , ) lowerCAmelCase__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) lowerCAmelCase__ = model.decode( decoder_input_ids[:, -1:] , snake_case__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=snake_case__ , decoder_position_ids=snake_case__ , ) lowerCAmelCase__ = model.decode(snake_case__ , snake_case__ , decoder_attention_mask=snake_case__ ) lowerCAmelCase__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) @require_flax class a_ ( unittest.TestCase ): UpperCamelCase_ : int = 99 def _SCREAMING_SNAKE_CASE ( self : List[Any] ): lowerCAmelCase__ = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) lowerCAmelCase__ = input_ids.shape[0] lowerCAmelCase__ = BlenderbotConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def _SCREAMING_SNAKE_CASE ( self : List[Any] ): lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._get_config_and_data() lowerCAmelCase__ = FlaxBlenderbotForConditionalGeneration(snake_case__ ) lowerCAmelCase__ = lm_model(input_ids=snake_case__ ) lowerCAmelCase__ = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs["""logits"""].shape , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Any ): lowerCAmelCase__ = BlenderbotConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) lowerCAmelCase__ = FlaxBlenderbotForConditionalGeneration(snake_case__ ) lowerCAmelCase__ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) lowerCAmelCase__ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) lowerCAmelCase__ = lm_model(input_ids=snake_case__ , decoder_input_ids=snake_case__ ) lowerCAmelCase__ = (*summary.shape, config.vocab_size) self.assertEqual(outputs["""logits"""].shape , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): lowerCAmelCase__ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) lowerCAmelCase__ = shift_tokens_right(snake_case__ , 1 , 2 ) lowerCAmelCase__ = np.equal(snake_case__ , 1 ).astype(np.floataa ).sum() lowerCAmelCase__ = np.equal(snake_case__ , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(snake_case__ , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class a_ ( __UpperCamelCase , unittest.TestCase , __UpperCamelCase ): UpperCamelCase_ : Optional[int] = True UpperCamelCase_ : Union[str, Any] = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) UpperCamelCase_ : Optional[int] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def _SCREAMING_SNAKE_CASE ( self : Any ): lowerCAmelCase__ = FlaxBlenderbotModelTester(self ) def _SCREAMING_SNAKE_CASE ( self : str ): lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(snake_case__ , snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : str ): lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(snake_case__ , snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Any ): lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCAmelCase__ = self._prepare_for_class(snake_case__ , snake_case__ ) lowerCAmelCase__ = model_class(snake_case__ ) @jax.jit def encode_jitted(snake_case__ : List[Any] , snake_case__ : Any=None , **snake_case__ : Any ): return model.encode(input_ids=snake_case__ , attention_mask=snake_case__ ) with self.subTest("""JIT Enabled""" ): lowerCAmelCase__ = encode_jitted(**snake_case__ ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): lowerCAmelCase__ = encode_jitted(**snake_case__ ).to_tuple() self.assertEqual(len(snake_case__ ) , len(snake_case__ ) ) for jitted_output, output in zip(snake_case__ , snake_case__ ): self.assertEqual(jitted_output.shape , output.shape ) def _SCREAMING_SNAKE_CASE ( self : Dict ): lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCAmelCase__ = model_class(snake_case__ ) lowerCAmelCase__ = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) lowerCAmelCase__ = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : List[Any] ): return model.decode( decoder_input_ids=snake_case__ , decoder_attention_mask=snake_case__ , encoder_outputs=snake_case__ , ) with self.subTest("""JIT Enabled""" ): lowerCAmelCase__ = decode_jitted(**snake_case__ ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): lowerCAmelCase__ = decode_jitted(**snake_case__ ).to_tuple() self.assertEqual(len(snake_case__ ) , len(snake_case__ ) ) for jitted_output, output in zip(snake_case__ , snake_case__ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def _SCREAMING_SNAKE_CASE ( self : int ): for model_class_name in self.all_model_classes: lowerCAmelCase__ = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""" ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids lowerCAmelCase__ = np.ones((1, 1) ) * model.config.eos_token_id lowerCAmelCase__ = model(snake_case__ ) self.assertIsNotNone(snake_case__ ) @unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""" ) @slow def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): lowerCAmelCase__ = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 15, """max_length""": 25} lowerCAmelCase__ = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True} lowerCAmelCase__ = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=snake_case__ ) lowerCAmelCase__ = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""" ) lowerCAmelCase__ = ["""Sam"""] lowerCAmelCase__ = tokenizer(snake_case__ , return_tensors="""jax""" ) lowerCAmelCase__ = model.generate(**snake_case__ , **snake_case__ ) lowerCAmelCase__ = """Sam is a great name. It means \"sun\" in Gaelic.""" lowerCAmelCase__ = tokenizer.batch_decode(snake_case__ , **snake_case__ ) assert generated_txt[0].strip() == tgt_text
674
"""simple docstring""" from binascii import hexlify from hashlib import shaaaa from os import urandom # RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for # Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526 __lowerCAmelCase : Any = { # 1536-bit 5: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, # 2048-bit 14: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AACAA68FFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, # 3072-bit 15: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, # 4096-bit 16: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199" + "FFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, # 6144-bit 17: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08" + "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B" + "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9" + "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6" + "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8" + "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C" + "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718" + "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D" + "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D" + "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226" + "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC" + "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26" + "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB" + "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2" + "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127" + "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406" + "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918" + "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151" + "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03" + "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F" + "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B" + "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632" + "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E" + "6DCC4024FFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, # 8192-bit 18: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD" + "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831" + "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B" + "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF" + "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6" + "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3" + "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328" + "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C" + "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE" + "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4" + "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300" + "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568" + "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9" + "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B" + "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A" + "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36" + "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1" + "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92" + "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47" + "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71" + "60C980DD98EDD3DFFFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, } class a_ : def __init__( self : List[str] , snake_case__ : int = 14 ): if group not in primes: raise ValueError("""Unsupported Group""" ) lowerCAmelCase__ = primes[group]["""prime"""] lowerCAmelCase__ = primes[group]["""generator"""] lowerCAmelCase__ = int(hexlify(urandom(32 ) ) , base=16 ) def _SCREAMING_SNAKE_CASE ( self : Any ): return hex(self.__private_key )[2:] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): lowerCAmelCase__ = pow(self.generator , self.__private_key , self.prime ) return hex(snake_case__ )[2:] def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : int ): # check if the other public key is valid based on NIST SP800-56 return ( 2 <= key <= self.prime - 2 and pow(snake_case__ , (self.prime - 1) // 2 , self.prime ) == 1 ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : str ): lowerCAmelCase__ = int(snake_case__ , base=16 ) if not self.is_valid_public_key(snake_case__ ): raise ValueError("""Invalid public key""" ) lowerCAmelCase__ = pow(snake_case__ , self.__private_key , self.prime ) return shaaaa(str(snake_case__ ).encode() ).hexdigest() @staticmethod def _SCREAMING_SNAKE_CASE ( snake_case__ : int , snake_case__ : int ): # check if the other public key is valid based on NIST SP800-56 return ( 2 <= remote_public_key_str <= prime - 2 and pow(snake_case__ , (prime - 1) // 2 , snake_case__ ) == 1 ) @staticmethod def _SCREAMING_SNAKE_CASE ( snake_case__ : str , snake_case__ : str , snake_case__ : int = 14 ): lowerCAmelCase__ = int(snake_case__ , base=16 ) lowerCAmelCase__ = int(snake_case__ , base=16 ) lowerCAmelCase__ = primes[group]["""prime"""] if not DiffieHellman.is_valid_public_key_static(snake_case__ , snake_case__ ): raise ValueError("""Invalid public key""" ) lowerCAmelCase__ = pow(snake_case__ , snake_case__ , snake_case__ ) return shaaaa(str(snake_case__ ).encode() ).hexdigest() if __name__ == "__main__": import doctest doctest.testmod()
674
1
"""simple docstring""" import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse("0.8.3"): raise Exception("requires gluonnlp == 0.8.3") if version.parse(mx.__version__) != version.parse("1.5.0"): raise Exception("requires mxnet == 1.5.0") logging.set_verbosity_info() __lowerCAmelCase : Any = logging.get_logger(__name__) __lowerCAmelCase : Any = "The Nymphenburg Palace is a beautiful palace in Munich!" def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = { """attention_cell""": """multi_head""", """num_layers""": 4, """units""": 1024, """hidden_size""": 768, """max_length""": 512, """num_heads""": 8, """scaled""": True, """dropout""": 0.1, """use_residual""": True, """embed_size""": 1024, """embed_dropout""": 0.1, """word_embed""": None, """layer_norm_eps""": 1e-5, """token_type_vocab_size""": 2, } lowerCAmelCase__ = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py lowerCAmelCase__ = BERTEncoder( attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=lowerCamelCase__ , output_all_encodings=lowerCamelCase__ , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , lowerCamelCase__ ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later lowerCAmelCase__ = """openwebtext_ccnews_stories_books_cased""" # Specify download folder to Gluonnlp's vocab lowerCAmelCase__ = os.path.join(get_home_dir() , """models""" ) lowerCAmelCase__ = _load_vocab(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , cls=lowerCamelCase__ ) lowerCAmelCase__ = nlp.model.BERTModel( lowerCamelCase__ , len(lowerCamelCase__ ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=lowerCamelCase__ , use_token_type_embed=lowerCamelCase__ , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=lowerCamelCase__ , use_decoder=lowerCamelCase__ , ) original_bort.load_parameters(lowerCamelCase__ , cast_dtype=lowerCamelCase__ , ignore_extra=lowerCamelCase__ ) lowerCAmelCase__ = original_bort._collect_params_with_prefix() # Build our config 🤗 lowerCAmelCase__ = { """architectures""": ["""BertForMaskedLM"""], """attention_probs_dropout_prob""": predefined_args["""dropout"""], """hidden_act""": """gelu""", """hidden_dropout_prob""": predefined_args["""dropout"""], """hidden_size""": predefined_args["""embed_size"""], """initializer_range""": 0.02, """intermediate_size""": predefined_args["""hidden_size"""], """layer_norm_eps""": predefined_args["""layer_norm_eps"""], """max_position_embeddings""": predefined_args["""max_length"""], """model_type""": """bort""", """num_attention_heads""": predefined_args["""num_heads"""], """num_hidden_layers""": predefined_args["""num_layers"""], """pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa """type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa """vocab_size""": len(lowerCamelCase__ ), } lowerCAmelCase__ = BertConfig.from_dict(lowerCamelCase__ ) lowerCAmelCase__ = BertForMaskedLM(lowerCamelCase__ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(lowerCamelCase__ ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(lowerCamelCase__ , lowerCamelCase__ ): lowerCAmelCase__ = hf_param.shape lowerCAmelCase__ = to_torch(params[gluon_param] ) lowerCAmelCase__ = gluon_param.shape assert ( shape_hf == shape_gluon ), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers""" return gluon_param lowerCAmelCase__ = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" ) lowerCAmelCase__ = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" ) lowerCAmelCase__ = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" ) lowerCAmelCase__ = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) lowerCAmelCase__ = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): lowerCAmelCase__ = hf_bort_model.bert.encoder.layer[i] # self attention lowerCAmelCase__ = layer.attention.self lowerCAmelCase__ = check_and_map_params( self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" ) lowerCAmelCase__ = check_and_map_params( self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" ) lowerCAmelCase__ = check_and_map_params( self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" ) lowerCAmelCase__ = check_and_map_params( self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" ) lowerCAmelCase__ = check_and_map_params( self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" ) lowerCAmelCase__ = check_and_map_params( self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" ) # self attention output lowerCAmelCase__ = layer.attention.output lowerCAmelCase__ = check_and_map_params( self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" ) lowerCAmelCase__ = check_and_map_params( self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" ) lowerCAmelCase__ = check_and_map_params( self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" ) lowerCAmelCase__ = check_and_map_params( self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" ) # intermediate lowerCAmelCase__ = layer.intermediate lowerCAmelCase__ = check_and_map_params( intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" ) lowerCAmelCase__ = check_and_map_params( intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" ) # output lowerCAmelCase__ = layer.output lowerCAmelCase__ = check_and_map_params( bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" ) lowerCAmelCase__ = check_and_map_params( bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" ) lowerCAmelCase__ = check_and_map_params( bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" ) lowerCAmelCase__ = check_and_map_params( bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models lowerCAmelCase__ = RobertaTokenizer.from_pretrained("""roberta-base""" ) lowerCAmelCase__ = tokenizer.encode_plus(lowerCamelCase__ )["""input_ids"""] # Get gluon output lowerCAmelCase__ = mx.nd.array([input_ids] ) lowerCAmelCase__ = original_bort(inputs=lowerCamelCase__ , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(lowerCamelCase__ ) lowerCAmelCase__ = BertModel.from_pretrained(lowerCamelCase__ ) hf_bort_model.eval() lowerCAmelCase__ = tokenizer.encode_plus(lowerCamelCase__ , return_tensors="""pt""" ) lowerCAmelCase__ = hf_bort_model(**lowerCamelCase__ )[0] lowerCAmelCase__ = output_gluon[0].asnumpy() lowerCAmelCase__ = output_hf[0].detach().numpy() lowerCAmelCase__ = np.max(np.abs(hf_layer - gluon_layer ) ).item() lowerCAmelCase__ = np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) if success: print("""✔️ Both model do output the same tensors""" ) else: print("""❌ Both model do **NOT** output the same tensors""" ) print("""Absolute difference is:""" , lowerCamelCase__ ) if __name__ == "__main__": __lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __lowerCAmelCase : str = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
674
"""simple docstring""" import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ): """simple docstring""" assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match""" lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ ) if bias is not None: assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match""" lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = np.asarray(weights[0] ) lowerCAmelCase__ = np.asarray(weights[1] ) lowerCAmelCase__ = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , ) set_param( torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = np.asarray(weights[0] ) lowerCAmelCase__ = np.asarray(weights[1] ) lowerCAmelCase__ = np.asarray(weights[2] ) lowerCAmelCase__ = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , ) set_param( torch_layer.self_attention.key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , ) set_param( torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = weights[0][0][0] lowerCAmelCase__ = np.asarray(layer_norm_a[0] ) lowerCAmelCase__ = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , ) # lsh weights + output lowerCAmelCase__ = weights[0][1] if len(lowerCamelCase__ ) < 4: set_layer_weights_in_torch_lsh(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ ) else: set_layer_weights_in_torch_local(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ ) # intermediate weighs lowerCAmelCase__ = weights[2][0][1][2] # Chunked Feed Forward if len(lowerCamelCase__ ) == 4: lowerCAmelCase__ = intermediate_weights[2] # layernorm 2 lowerCAmelCase__ = np.asarray(intermediate_weights[0][0] ) lowerCAmelCase__ = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , ) # intermediate dense lowerCAmelCase__ = np.asarray(intermediate_weights[1][0] ) lowerCAmelCase__ = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , ) # intermediate out lowerCAmelCase__ = np.asarray(intermediate_weights[4][0] ) lowerCAmelCase__ = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = torch_model.reformer # word embeds lowerCAmelCase__ = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase__ ) , ) if isinstance(weights[3] , lowerCamelCase__ ): lowerCAmelCase__ = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): lowerCAmelCase__ = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), f"""{position_embeddings[emb_idx]} emb does not match""" lowerCAmelCase__ = nn.Parameter(torch.tensor(lowerCamelCase__ ) ) lowerCAmelCase__ = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( lowerCamelCase__ ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): lowerCAmelCase__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # output layer norm lowerCAmelCase__ = np.asarray(weights[7][0] ) lowerCAmelCase__ = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , ) # output embeddings lowerCAmelCase__ = np.asarray(weights[9][0] ) lowerCAmelCase__ = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = ReformerConfig.from_json_file(lowerCamelCase__ ) print(f"""Building PyTorch model from configuration: {config}""" ) lowerCAmelCase__ = ReformerModelWithLMHead(lowerCamelCase__ ) with open(lowerCamelCase__ , """rb""" ) as f: lowerCAmelCase__ = pickle.load(lowerCamelCase__ )["""weights"""] set_model_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , config.hidden_size ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , lowerCamelCase__ ) if __name__ == "__main__": __lowerCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained Reformer model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __lowerCAmelCase : Union[str, Any] = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
674
1
"""simple docstring""" import os import re import warnings from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer if TYPE_CHECKING: from ...tokenization_utils_base import TextInput from ...utils import logging __lowerCAmelCase : Optional[int] = logging.get_logger(__name__) __lowerCAmelCase : str = {"vocab_file": "spiece.model"} __lowerCAmelCase : Union[str, Any] = { "vocab_file": { "t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model", "t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model", "t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model", "t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model", "t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model", } } # TODO(PVP) - this should be removed in Transformers v5 __lowerCAmelCase : str = { "t5-small": 5_12, "t5-base": 5_12, "t5-large": 5_12, "t5-3b": 5_12, "t5-11b": 5_12, } __lowerCAmelCase : int = "▁" class a_ ( __UpperCamelCase ): UpperCamelCase_ : str = VOCAB_FILES_NAMES UpperCamelCase_ : int = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : List[Any] = ["input_ids", "attention_mask"] def __init__( self : str , snake_case__ : List[Any] , snake_case__ : Optional[int]="</s>" , snake_case__ : List[str]="<unk>" , snake_case__ : Union[str, Any]="<pad>" , snake_case__ : Tuple=100 , snake_case__ : Dict=None , snake_case__ : Optional[Dict[str, Any]] = None , snake_case__ : Union[str, Any]=True , **snake_case__ : Tuple , ): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: lowerCAmelCase__ = [F"""<extra_id_{i}>""" for i in range(snake_case__ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens lowerCAmelCase__ = len(set(filter(lambda snake_case__ : bool("""extra_id""" in str(snake_case__ ) ) , snake_case__ ) ) ) if extra_tokens != extra_ids: raise ValueError( F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are""" """ provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids""" """ tokens""" ) if legacy: logger.warning_once( F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to""" """ read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" ) lowerCAmelCase__ = legacy lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , extra_ids=snake_case__ , additional_special_tokens=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , legacy=snake_case__ , **snake_case__ , ) lowerCAmelCase__ = vocab_file lowerCAmelCase__ = extra_ids lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(snake_case__ ) @staticmethod def _SCREAMING_SNAKE_CASE ( snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : str ): if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes: lowerCAmelCase__ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( """This tokenizer was incorrectly instantiated with a model max length of""" F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this""" """ behavior is kept to avoid breaking backwards compatibility when padding/encoding with""" """ `truncation is True`.\n- Be aware that you SHOULD NOT rely on""" F""" {pretrained_model_name_or_path} automatically truncating your input to""" F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences""" F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with""" """ `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please""" """ instantiate this tokenizer with `model_max_length` set to your preferred value.""" , snake_case__ , ) return max_model_length @property def _SCREAMING_SNAKE_CASE ( self : str ): return self.sp_model.get_piece_size() + self._extra_ids def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): lowerCAmelCase__ = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(snake_case__ )) + [1] return ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1] def _SCREAMING_SNAKE_CASE ( self : Tuple ): return list( set(filter(lambda snake_case__ : bool(re.search(R"""<extra_id_\d+>""" , snake_case__ ) ) is not None , self.additional_special_tokens ) ) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): return [self._convert_token_to_id(snake_case__ ) for token in self.get_sentinel_tokens()] def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : List[int] ): if len(snake_case__ ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated""" """ eos tokens being added.""" ) return token_ids else: return token_ids + [self.eos_token_id] def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): lowerCAmelCase__ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): lowerCAmelCase__ = self._add_eos_if_not_present(snake_case__ ) if token_ids_a is None: return token_ids_a else: lowerCAmelCase__ = self._add_eos_if_not_present(snake_case__ ) return token_ids_a + token_ids_a def __getstate__( self : str ): lowerCAmelCase__ = self.__dict__.copy() lowerCAmelCase__ = None return state def __setstate__( self : Dict , snake_case__ : Tuple ): lowerCAmelCase__ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): lowerCAmelCase__ = {} lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : "TextInput" , **snake_case__ : Any ): # Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at # the beginning of the text if not self.legacy: lowerCAmelCase__ = SPIECE_UNDERLINE + text.replace(snake_case__ , """ """ ) return super().tokenize(snake_case__ , **snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Dict , **snake_case__ : Optional[Any] ): if not self.legacy: lowerCAmelCase__ = text.startswith(snake_case__ ) if is_first: lowerCAmelCase__ = text[1:] lowerCAmelCase__ = self.sp_model.encode(snake_case__ , out_type=snake_case__ ) if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(snake_case__ ): lowerCAmelCase__ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:] return tokens def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : str ): if token.startswith("""<extra_id_""" ): lowerCAmelCase__ = re.match(R"""<extra_id_(\d+)>""" , snake_case__ ) lowerCAmelCase__ = int(match.group(1 ) ) return self.vocab_size - num - 1 return self.sp_model.piece_to_id(snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : Dict ): if index < self.sp_model.get_piece_size(): lowerCAmelCase__ = self.sp_model.IdToPiece(snake_case__ ) else: lowerCAmelCase__ = F"""<extra_id_{self.vocab_size - 1 - index}>""" return token def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Optional[int] ): lowerCAmelCase__ = [] lowerCAmelCase__ = """""" lowerCAmelCase__ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(snake_case__ ) + token lowerCAmelCase__ = True lowerCAmelCase__ = [] else: current_sub_tokens.append(snake_case__ ) lowerCAmelCase__ = False out_string += self.sp_model.decode(snake_case__ ) return out_string.strip() def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : str , snake_case__ : Optional[str] = None ): if not os.path.isdir(snake_case__ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ = os.path.join( snake_case__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , snake_case__ ) elif not os.path.isfile(self.vocab_file ): with open(snake_case__ , """wb""" ) as fi: lowerCAmelCase__ = self.sp_model.serialized_model_proto() fi.write(snake_case__ ) return (out_vocab_file,)
674
"""simple docstring""" import os from math import logaa def _UpperCAmelCase ( lowerCamelCase__ = "base_exp.txt" ): """simple docstring""" lowerCAmelCase__ = 0 lowerCAmelCase__ = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(lowerCamelCase__ ) , lowerCamelCase__ ) ) ): lowerCAmelCase__ , lowerCAmelCase__ = list(map(lowerCamelCase__ , line.split(""",""" ) ) ) if x * logaa(lowerCamelCase__ ) > largest: lowerCAmelCase__ = x * logaa(lowerCamelCase__ ) lowerCAmelCase__ = i + 1 return result if __name__ == "__main__": print(solution())
674
1
"""simple docstring""" class a_ : def __init__( self : Optional[Any] , snake_case__ : int ): lowerCAmelCase__ = n lowerCAmelCase__ = [None] * self.n lowerCAmelCase__ = 0 # index of the first element lowerCAmelCase__ = 0 lowerCAmelCase__ = 0 def __len__( self : int ): return self.size def _SCREAMING_SNAKE_CASE ( self : Any ): return self.size == 0 def _SCREAMING_SNAKE_CASE ( self : Dict ): return False if self.is_empty() else self.array[self.front] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : List[str] ): if self.size >= self.n: raise Exception("""QUEUE IS FULL""" ) lowerCAmelCase__ = data lowerCAmelCase__ = (self.rear + 1) % self.n self.size += 1 return self def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): if self.size == 0: raise Exception("""UNDERFLOW""" ) lowerCAmelCase__ = self.array[self.front] lowerCAmelCase__ = None lowerCAmelCase__ = (self.front + 1) % self.n self.size -= 1 return temp
674
"""simple docstring""" def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" while b: lowerCAmelCase__ , lowerCAmelCase__ = b, a % b return a def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" return a if b == 0 else euclidean_gcd_recursive(lowerCamelCase__ , a % b ) def _UpperCAmelCase ( ): """simple docstring""" print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" ) print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" ) print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" ) print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" ) print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" ) print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" ) print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" ) print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" ) if __name__ == "__main__": main()
674
1
"""simple docstring""" from collections.abc import Callable import numpy as np def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = int(np.ceil((x_end - xa) / step_size ) ) lowerCAmelCase__ = np.zeros((n + 1,) ) lowerCAmelCase__ = ya lowerCAmelCase__ = xa for k in range(lowerCamelCase__ ): lowerCAmelCase__ = y[k] + step_size * ode_func(lowerCamelCase__ , y[k] ) lowerCAmelCase__ = y[k] + ( (step_size / 2) * (ode_func(lowerCamelCase__ , y[k] ) + ode_func(x + step_size , lowerCamelCase__ )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
674
"""simple docstring""" import os def _UpperCAmelCase ( ): """simple docstring""" lowerCAmelCase__ = os.path.dirname(os.path.realpath(lowerCamelCase__ ) ) lowerCAmelCase__ = os.path.join(lowerCamelCase__ , """triangle.txt""" ) with open(lowerCamelCase__ ) as f: lowerCAmelCase__ = f.readlines() lowerCAmelCase__ = [] for line in triangle: lowerCAmelCase__ = [] for number in line.strip().split(""" """ ): numbers_from_line.append(int(lowerCamelCase__ ) ) a.append(lowerCamelCase__ ) for i in range(1 , len(lowerCamelCase__ ) ): for j in range(len(a[i] ) ): lowerCAmelCase__ = a[i - 1][j] if j != len(a[i - 1] ) else 0 lowerCAmelCase__ = a[i - 1][j - 1] if j > 0 else 0 a[i][j] += max(lowerCamelCase__ , lowerCamelCase__ ) return max(a[-1] ) if __name__ == "__main__": print(solution())
674
1
"""simple docstring""" import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="attention" ): """simple docstring""" lowerCAmelCase__ = params[f"""{prefix}/layers_{i}/{layer_name}/key/kernel"""] lowerCAmelCase__ = params[f"""{prefix}/layers_{i}/{layer_name}/out/kernel"""] lowerCAmelCase__ = params[f"""{prefix}/layers_{i}/{layer_name}/query/kernel"""] lowerCAmelCase__ = params[f"""{prefix}/layers_{i}/{layer_name}/value/kernel"""] return k, o, q, v def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ): """simple docstring""" if split_mlp_wi: lowerCAmelCase__ = params[f"""{prefix}/layers_{i}/mlp/wi_0/kernel"""] lowerCAmelCase__ = params[f"""{prefix}/layers_{i}/mlp/wi_1/kernel"""] lowerCAmelCase__ = (wi_a, wi_a) else: lowerCAmelCase__ = params[f"""{prefix}/layers_{i}/mlp/wi/kernel"""] lowerCAmelCase__ = params[f"""{prefix}/layers_{i}/mlp/wo/kernel"""] return wi, wo def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" return params[f"""{prefix}/layers_{i}/{layer_name}/scale"""] def _UpperCAmelCase ( lowerCamelCase__ , *, lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = traverse_util.flatten_dict(variables["""target"""] ) lowerCAmelCase__ = {"""/""".join(lowerCamelCase__ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi lowerCAmelCase__ = """encoder/layers_0/mlp/wi_0/kernel""" in old print("""Split MLP:""" , lowerCamelCase__ ) lowerCAmelCase__ = collections.OrderedDict() # Shared embeddings. lowerCAmelCase__ = old["""token_embedder/embedding"""] # Encoder. for i in range(lowerCamelCase__ ): # Block i, layer 0 (Self Attention). lowerCAmelCase__ = tax_layer_norm_lookup(lowerCamelCase__ , lowerCamelCase__ , """encoder""" , """pre_attention_layer_norm""" ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = tax_attention_lookup(lowerCamelCase__ , lowerCamelCase__ , """encoder""" , """attention""" ) lowerCAmelCase__ = layer_norm lowerCAmelCase__ = k.T lowerCAmelCase__ = o.T lowerCAmelCase__ = q.T lowerCAmelCase__ = v.T # Block i, layer 1 (MLP). lowerCAmelCase__ = tax_layer_norm_lookup(lowerCamelCase__ , lowerCamelCase__ , """encoder""" , """pre_mlp_layer_norm""" ) lowerCAmelCase__ , lowerCAmelCase__ = tax_mlp_lookup(lowerCamelCase__ , lowerCamelCase__ , """encoder""" , lowerCamelCase__ ) lowerCAmelCase__ = layer_norm if split_mlp_wi: lowerCAmelCase__ = wi[0].T lowerCAmelCase__ = wi[1].T else: lowerCAmelCase__ = wi.T lowerCAmelCase__ = wo.T lowerCAmelCase__ = old[ """encoder/relpos_bias/rel_embedding""" ].T lowerCAmelCase__ = old["""encoder/encoder_norm/scale"""] if not is_encoder_only: # Decoder. for i in range(lowerCamelCase__ ): # Block i, layer 0 (Self Attention). lowerCAmelCase__ = tax_layer_norm_lookup(lowerCamelCase__ , lowerCamelCase__ , """decoder""" , """pre_self_attention_layer_norm""" ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = tax_attention_lookup(lowerCamelCase__ , lowerCamelCase__ , """decoder""" , """self_attention""" ) lowerCAmelCase__ = layer_norm lowerCAmelCase__ = k.T lowerCAmelCase__ = o.T lowerCAmelCase__ = q.T lowerCAmelCase__ = v.T # Block i, layer 1 (Cross Attention). lowerCAmelCase__ = tax_layer_norm_lookup(lowerCamelCase__ , lowerCamelCase__ , """decoder""" , """pre_cross_attention_layer_norm""" ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = tax_attention_lookup(lowerCamelCase__ , lowerCamelCase__ , """decoder""" , """encoder_decoder_attention""" ) lowerCAmelCase__ = layer_norm lowerCAmelCase__ = k.T lowerCAmelCase__ = o.T lowerCAmelCase__ = q.T lowerCAmelCase__ = v.T # Block i, layer 2 (MLP). lowerCAmelCase__ = tax_layer_norm_lookup(lowerCamelCase__ , lowerCamelCase__ , """decoder""" , """pre_mlp_layer_norm""" ) lowerCAmelCase__ , lowerCAmelCase__ = tax_mlp_lookup(lowerCamelCase__ , lowerCamelCase__ , """decoder""" , lowerCamelCase__ ) lowerCAmelCase__ = layer_norm if split_mlp_wi: lowerCAmelCase__ = wi[0].T lowerCAmelCase__ = wi[1].T else: lowerCAmelCase__ = wi.T lowerCAmelCase__ = wo.T lowerCAmelCase__ = old["""decoder/decoder_norm/scale"""] lowerCAmelCase__ = old[ """decoder/relpos_bias/rel_embedding""" ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: lowerCAmelCase__ = old["""decoder/logits_dense/kernel"""].T return new def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: lowerCAmelCase__ = state_dict["""shared.weight"""] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: lowerCAmelCase__ = state_dict["""shared.weight"""] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("""Using shared word embeddings as lm_head.""" ) lowerCAmelCase__ = state_dict["""shared.weight"""] return state_dict def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = checkpoints.load_tax_checkpoint(lowerCamelCase__ ) lowerCAmelCase__ = convert_tax_to_pytorch(lowerCamelCase__ , num_layers=config.num_layers , is_encoder_only=lowerCamelCase__ ) lowerCAmelCase__ = make_state_dict(lowerCamelCase__ , lowerCamelCase__ ) model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ): """simple docstring""" lowerCAmelCase__ = TaConfig.from_json_file(lowerCamelCase__ ) print(f"""Building PyTorch model from configuration: {config}""" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: lowerCAmelCase__ = TaEncoderModel(lowerCamelCase__ ) else: lowerCAmelCase__ = TaForConditionalGeneration(lowerCamelCase__ ) # Load weights from tf checkpoint load_tax_weights_in_ta(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(lowerCamelCase__ ) # Verify that we can load the checkpoint. model.from_pretrained(lowerCamelCase__ ) print("""Done""" ) if __name__ == "__main__": __lowerCAmelCase : List[Any] = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.") # Required parameters parser.add_argument( "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False ) __lowerCAmelCase : Any = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
674
"""simple docstring""" import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu __lowerCAmelCase : Union[str, Any] = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json" with io.open(filename, "r", encoding="utf-8") as f: __lowerCAmelCase : Optional[int] = json.load(f) @require_torch class a_ ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Dict ): return FSMTTokenizer.from_pretrained(snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Any ): lowerCAmelCase__ = FSMTForConditionalGeneration.from_pretrained(snake_case__ ).to(snake_case__ ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ["""en-ru""", 26.0], ["""ru-en""", 22.0], ["""en-de""", 22.0], ["""de-en""", 29.0], ] ) @slow def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Any , snake_case__ : int ): # note: this test is not testing the best performance since it only evals a small batch # but it should be enough to detect a regression in the output quality lowerCAmelCase__ = F"""facebook/wmt19-{pair}""" lowerCAmelCase__ = self.get_tokenizer(snake_case__ ) lowerCAmelCase__ = self.get_model(snake_case__ ) lowerCAmelCase__ = bleu_data[pair]["""src"""] lowerCAmelCase__ = bleu_data[pair]["""tgt"""] lowerCAmelCase__ = tokenizer(snake_case__ , return_tensors="""pt""" , truncation=snake_case__ , padding="""longest""" ).to(snake_case__ ) lowerCAmelCase__ = model.generate( input_ids=batch.input_ids , num_beams=8 , ) lowerCAmelCase__ = tokenizer.batch_decode( snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ ) lowerCAmelCase__ = calculate_bleu(snake_case__ , snake_case__ ) print(snake_case__ ) self.assertGreaterEqual(scores["""bleu"""] , snake_case__ )
674
1
"""simple docstring""" import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __lowerCAmelCase : Optional[Any] = get_tests_dir("fixtures/test_sentencepiece_no_bos.model") @require_sentencepiece @require_tokenizers class a_ ( __UpperCamelCase , unittest.TestCase ): UpperCamelCase_ : Tuple = PegasusTokenizer UpperCamelCase_ : Any = PegasusTokenizerFast UpperCamelCase_ : int = True UpperCamelCase_ : Any = True def _SCREAMING_SNAKE_CASE ( self : Tuple ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ = PegasusTokenizer(snake_case__ ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): return PegasusTokenizer.from_pretrained("""google/pegasus-large""" ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **snake_case__ : Optional[Any] ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Optional[Any] ): return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): lowerCAmelCase__ = """</s>""" lowerCAmelCase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<pad>""" ) self.assertEqual(vocab_keys[1] , """</s>""" ) self.assertEqual(vocab_keys[-1] , """v""" ) self.assertEqual(len(snake_case__ ) , 1103 ) def _SCREAMING_SNAKE_CASE ( self : Any ): self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def _SCREAMING_SNAKE_CASE ( self : Dict ): lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ = ( """Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important""" """ </s> <pad> <pad> <pad>""" ) lowerCAmelCase__ = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] lowerCAmelCase__ = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Dict ): lowerCAmelCase__ = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word lowerCAmelCase__ = """<mask_1> To ensure a <mask_2> flow of bank resolutions.""" lowerCAmelCase__ = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1] lowerCAmelCase__ = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : int ): lowerCAmelCase__ = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 96103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 lowerCAmelCase__ = """To ensure a smooth flow of bank resolutions.""" lowerCAmelCase__ = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1] lowerCAmelCase__ = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def _SCREAMING_SNAKE_CASE ( self : List[str] ): lowerCAmelCase__ = ["""This is going to be way too long.""" * 150, """short example"""] lowerCAmelCase__ = ["""not super long but more than 5 tokens""", """tiny"""] lowerCAmelCase__ = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" ) lowerCAmelCase__ = self._large_tokenizer( text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(snake_case__ ) == 2 # input_ids, attention_mask. @slow def _SCREAMING_SNAKE_CASE ( self : str ): # fmt: off lowerCAmelCase__ = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case__ , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , ) @require_sentencepiece @require_tokenizers class a_ ( __UpperCamelCase , unittest.TestCase ): UpperCamelCase_ : str = PegasusTokenizer UpperCamelCase_ : Optional[int] = PegasusTokenizerFast UpperCamelCase_ : Union[str, Any] = True UpperCamelCase_ : Optional[int] = True def _SCREAMING_SNAKE_CASE ( self : List[str] ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ = PegasusTokenizer(snake_case__ , offset=0 , mask_token_sent=snake_case__ , mask_token="""[MASK]""" ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : Dict ): return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **snake_case__ : List[Any] ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Dict ): return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : List[str] ): lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ = ( """Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>""" """ <pad> <pad> <pad>""" ) lowerCAmelCase__ = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] lowerCAmelCase__ = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) @require_torch def _SCREAMING_SNAKE_CASE ( self : List[Any] ): lowerCAmelCase__ = ["""This is going to be way too long.""" * 1000, """short example"""] lowerCAmelCase__ = ["""not super long but more than 5 tokens""", """tiny"""] lowerCAmelCase__ = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" ) lowerCAmelCase__ = self._large_tokenizer( text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(snake_case__ ) == 2 # input_ids, attention_mask. def _SCREAMING_SNAKE_CASE ( self : List[Any] ): lowerCAmelCase__ = ( """This is an example string that is used to test the original TF implementation against the HF""" """ implementation""" ) lowerCAmelCase__ = self._large_tokenizer(snake_case__ ).input_ids self.assertListEqual( snake_case__ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
674
"""simple docstring""" import pprint import requests __lowerCAmelCase : Union[str, Any] = "https://zenquotes.io/api" def _UpperCAmelCase ( ): """simple docstring""" return requests.get(API_ENDPOINT_URL + """/today""" ).json() def _UpperCAmelCase ( ): """simple docstring""" return requests.get(API_ENDPOINT_URL + """/random""" ).json() if __name__ == "__main__": __lowerCAmelCase : Union[str, Any] = random_quotes() pprint.pprint(response)
674
1
"""simple docstring""" import copy import os from typing import TYPE_CHECKING, List, Union if TYPE_CHECKING: pass from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) __lowerCAmelCase : List[Any] = { "kakaobrain/align-base": "https://huggingface.co/kakaobrain/align-base/resolve/main/config.json", } class a_ ( __UpperCamelCase ): UpperCamelCase_ : Optional[int] = "align_text_model" def __init__( self : Optional[Any] , snake_case__ : Optional[Any]=30522 , snake_case__ : Tuple=768 , snake_case__ : Union[str, Any]=12 , snake_case__ : Tuple=12 , snake_case__ : Dict=3072 , snake_case__ : int="gelu" , snake_case__ : Tuple=0.1 , snake_case__ : Any=0.1 , snake_case__ : List[Any]=512 , snake_case__ : str=2 , snake_case__ : List[str]=0.02 , snake_case__ : Tuple=1E-12 , snake_case__ : Dict=0 , snake_case__ : str="absolute" , snake_case__ : str=True , **snake_case__ : Dict , ): super().__init__(**snake_case__ ) lowerCAmelCase__ = vocab_size lowerCAmelCase__ = hidden_size lowerCAmelCase__ = num_hidden_layers lowerCAmelCase__ = num_attention_heads lowerCAmelCase__ = hidden_act lowerCAmelCase__ = intermediate_size lowerCAmelCase__ = hidden_dropout_prob lowerCAmelCase__ = attention_probs_dropout_prob lowerCAmelCase__ = max_position_embeddings lowerCAmelCase__ = type_vocab_size lowerCAmelCase__ = initializer_range lowerCAmelCase__ = layer_norm_eps lowerCAmelCase__ = position_embedding_type lowerCAmelCase__ = use_cache lowerCAmelCase__ = pad_token_id @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict , snake_case__ : Union[str, os.PathLike] , **snake_case__ : Tuple ): cls._set_token_in_kwargs(snake_case__ ) lowerCAmelCase__ , lowerCAmelCase__ = cls.get_config_dict(snake_case__ , **snake_case__ ) # get the text config dict if we are loading from AlignConfig if config_dict.get("""model_type""" ) == "align": lowerCAmelCase__ = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(snake_case__ , **snake_case__ ) class a_ ( __UpperCamelCase ): UpperCamelCase_ : Any = "align_vision_model" def __init__( self : Tuple , snake_case__ : int = 3 , snake_case__ : int = 600 , snake_case__ : float = 2.0 , snake_case__ : float = 3.1 , snake_case__ : int = 8 , snake_case__ : List[int] = [3, 3, 5, 3, 5, 5, 3] , snake_case__ : List[int] = [32, 16, 24, 40, 80, 112, 192] , snake_case__ : List[int] = [16, 24, 40, 80, 112, 192, 320] , snake_case__ : List[int] = [] , snake_case__ : List[int] = [1, 2, 2, 2, 1, 2, 1] , snake_case__ : List[int] = [1, 2, 2, 3, 3, 4, 1] , snake_case__ : List[int] = [1, 6, 6, 6, 6, 6, 6] , snake_case__ : float = 0.25 , snake_case__ : str = "swish" , snake_case__ : int = 2560 , snake_case__ : str = "mean" , snake_case__ : float = 0.02 , snake_case__ : float = 0.001 , snake_case__ : float = 0.99 , snake_case__ : float = 0.2 , **snake_case__ : Union[str, Any] , ): super().__init__(**snake_case__ ) lowerCAmelCase__ = num_channels lowerCAmelCase__ = image_size lowerCAmelCase__ = width_coefficient lowerCAmelCase__ = depth_coefficient lowerCAmelCase__ = depth_divisor lowerCAmelCase__ = kernel_sizes lowerCAmelCase__ = in_channels lowerCAmelCase__ = out_channels lowerCAmelCase__ = depthwise_padding lowerCAmelCase__ = strides lowerCAmelCase__ = num_block_repeats lowerCAmelCase__ = expand_ratios lowerCAmelCase__ = squeeze_expansion_ratio lowerCAmelCase__ = hidden_act lowerCAmelCase__ = hidden_dim lowerCAmelCase__ = pooling_type lowerCAmelCase__ = initializer_range lowerCAmelCase__ = batch_norm_eps lowerCAmelCase__ = batch_norm_momentum lowerCAmelCase__ = drop_connect_rate lowerCAmelCase__ = sum(snake_case__ ) * 4 @classmethod def _SCREAMING_SNAKE_CASE ( cls : int , snake_case__ : Union[str, os.PathLike] , **snake_case__ : Optional[Any] ): cls._set_token_in_kwargs(snake_case__ ) lowerCAmelCase__ , lowerCAmelCase__ = cls.get_config_dict(snake_case__ , **snake_case__ ) # get the vision config dict if we are loading from AlignConfig if config_dict.get("""model_type""" ) == "align": lowerCAmelCase__ = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(snake_case__ , **snake_case__ ) class a_ ( __UpperCamelCase ): UpperCamelCase_ : int = "align" UpperCamelCase_ : Any = True def __init__( self : Union[str, Any] , snake_case__ : Dict=None , snake_case__ : List[Any]=None , snake_case__ : Union[str, Any]=640 , snake_case__ : Optional[Any]=1.0 , snake_case__ : Any=0.02 , **snake_case__ : Any , ): super().__init__(**snake_case__ ) if text_config is None: lowerCAmelCase__ = {} logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""" ) if vision_config is None: lowerCAmelCase__ = {} logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""" ) lowerCAmelCase__ = AlignTextConfig(**snake_case__ ) lowerCAmelCase__ = AlignVisionConfig(**snake_case__ ) lowerCAmelCase__ = projection_dim lowerCAmelCase__ = temperature_init_value lowerCAmelCase__ = initializer_range @classmethod def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , snake_case__ : AlignTextConfig , snake_case__ : AlignVisionConfig , **snake_case__ : List[str] ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): lowerCAmelCase__ = copy.deepcopy(self.__dict__ ) lowerCAmelCase__ = self.text_config.to_dict() lowerCAmelCase__ = self.vision_config.to_dict() lowerCAmelCase__ = self.__class__.model_type return output
674
"""simple docstring""" import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class a_ ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): lowerCAmelCase__ = 0 def _SCREAMING_SNAKE_CASE ( self : List[Any] ): lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" ) self.assertIsInstance(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json""" lowerCAmelCase__ = Path(snake_case__ ) / """config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json""" lowerCAmelCase__ = Path(snake_case__ ) / """config.json""" json.dump( {"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ = CLIPConfig() # Create a dummy config file with image_proceesor_type lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json""" lowerCAmelCase__ = Path(snake_case__ ) / """config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ).to_dict() config_dict.pop("""image_processor_type""" ) lowerCAmelCase__ = CLIPImageProcessor(**snake_case__ ) # save in new folder model_config.save_pretrained(snake_case__ ) config.save_pretrained(snake_case__ ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ) # make sure private variable is not incorrectly saved lowerCAmelCase__ = json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Dict ): with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): with self.assertRaisesRegex( snake_case__ , """clip-base is not a local folder and is not a valid model identifier""" ): lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""clip-base""" ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): with self.assertRaisesRegex( snake_case__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ , revision="""aaaaaa""" ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): with self.assertRaisesRegex( snake_case__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ): lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" ) def _SCREAMING_SNAKE_CASE ( self : Any ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(snake_case__ ): lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(snake_case__ ): lowerCAmelCase__ = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(snake_case__ ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ , trust_remote_code=snake_case__ ) self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" ) def _SCREAMING_SNAKE_CASE ( self : Dict ): try: AutoConfig.register("""custom""" , snake_case__ ) AutoImageProcessor.register(snake_case__ , snake_case__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(snake_case__ ): AutoImageProcessor.register(snake_case__ , snake_case__ ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json""" lowerCAmelCase__ = Path(snake_case__ ) / """config.json""" json.dump( {"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) ) lowerCAmelCase__ = CustomImageProcessor.from_pretrained(snake_case__ ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(snake_case__ ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def _SCREAMING_SNAKE_CASE ( self : List[str] ): class a_ ( __UpperCamelCase ): UpperCamelCase_ : Tuple = True try: AutoConfig.register("""custom""" , snake_case__ ) AutoImageProcessor.register(snake_case__ , snake_case__ ) # If remote code is not set, the default is to use local lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. lowerCAmelCase__ = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub lowerCAmelCase__ = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(not hasattr(snake_case__ , """is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
674
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : str = { "abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json", } class a_ ( __UpperCamelCase ): UpperCamelCase_ : Union[str, Any] = "gpt_neox_japanese" def __init__( self : List[str] , snake_case__ : Tuple=32000 , snake_case__ : int=2560 , snake_case__ : str=32 , snake_case__ : Union[str, Any]=32 , snake_case__ : Any=4 , snake_case__ : Optional[int]="gelu" , snake_case__ : str=1.00 , snake_case__ : Dict=10000 , snake_case__ : Optional[int]=2048 , snake_case__ : Any=0.02 , snake_case__ : List[str]=1E-5 , snake_case__ : str=True , snake_case__ : int=31996 , snake_case__ : Any=31999 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : Optional[int]=0.0 , **snake_case__ : Optional[Any] , ): super().__init__(bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ ) lowerCAmelCase__ = vocab_size lowerCAmelCase__ = max_position_embeddings lowerCAmelCase__ = hidden_size lowerCAmelCase__ = num_hidden_layers lowerCAmelCase__ = num_attention_heads lowerCAmelCase__ = intermediate_multiple_size lowerCAmelCase__ = hidden_act lowerCAmelCase__ = rotary_pct lowerCAmelCase__ = rotary_emb_base lowerCAmelCase__ = initializer_range lowerCAmelCase__ = layer_norm_eps lowerCAmelCase__ = use_cache lowerCAmelCase__ = attention_dropout lowerCAmelCase__ = hidden_dropout
674
"""simple docstring""" import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class a_ : def __init__( self : Optional[int] , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=64 , snake_case__ : Any=None ): lowerCAmelCase__ = np.random.default_rng(snake_case__ ) lowerCAmelCase__ = length lowerCAmelCase__ = rng.normal(size=(length,) ).astype(np.floataa ) lowerCAmelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self : Optional[Any] ): return self.length def __getitem__( self : List[str] , snake_case__ : Optional[int] ): return {"x": self.x[i], "y": self.y[i]} class a_ ( torch.nn.Module ): def __init__( self : List[str] , snake_case__ : str=0 , snake_case__ : Dict=0 , snake_case__ : Any=False ): super().__init__() lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) lowerCAmelCase__ = True def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Any=None ): if self.first_batch: print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" ) lowerCAmelCase__ = False return x * self.a[0] + self.b[0] class a_ ( torch.nn.Module ): def __init__( self : Any , snake_case__ : Union[str, Any]=0 , snake_case__ : Union[str, Any]=0 , snake_case__ : List[Any]=False ): super().__init__() lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(snake_case__ ).float() ) lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(snake_case__ ).float() ) lowerCAmelCase__ = True def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Optional[Any]=None ): if self.first_batch: print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" ) lowerCAmelCase__ = False return x * self.a + self.b def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 16 ): """simple docstring""" from datasets import load_dataset from transformers import AutoTokenizer lowerCAmelCase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" ) lowerCAmelCase__ = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""} lowerCAmelCase__ = load_dataset("""csv""" , data_files=lowerCamelCase__ ) lowerCAmelCase__ = datasets["""train"""].unique("""label""" ) lowerCAmelCase__ = {v: i for i, v in enumerate(lowerCamelCase__ )} def tokenize_function(lowerCamelCase__ ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase__ = tokenizer( examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" ) if "label" in examples: lowerCAmelCase__ = [label_to_id[l] for l in examples["""label"""]] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowerCAmelCase__ = datasets.map( lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["""sentence1""", """sentence2""", """label"""] , ) def collate_fn(lowerCamelCase__ ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowerCamelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return tokenizer.pad(lowerCamelCase__ , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. lowerCAmelCase__ = DataLoader(tokenized_datasets["""train"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=2 ) lowerCAmelCase__ = DataLoader(tokenized_datasets["""validation"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=1 ) return train_dataloader, eval_dataloader
674
1
"""simple docstring""" from math import pi, sqrt def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" if num <= 0: raise ValueError("""math domain error""" ) if num > 1_71.5: raise OverflowError("""math range error""" ) elif num - int(lowerCamelCase__ ) not in (0, 0.5): raise NotImplementedError("""num must be an integer or a half-integer""" ) elif num == 0.5: return sqrt(lowerCamelCase__ ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def _UpperCAmelCase ( ): """simple docstring""" assert gamma(0.5 ) == sqrt(lowerCamelCase__ ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() __lowerCAmelCase : Dict = 1.0 while num: __lowerCAmelCase : Any = float(input("Gamma of: ")) print(F"gamma({num}) = {gamma(num)}") print("\nEnter 0 to exit...")
674
"""simple docstring""" import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = MobileBertConfig.from_json_file(lowerCamelCase__ ) print(f"""Building PyTorch model from configuration: {config}""" ) lowerCAmelCase__ = MobileBertForPreTraining(lowerCamelCase__ ) # Load weights from tf checkpoint lowerCAmelCase__ = load_tf_weights_in_mobilebert(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , lowerCamelCase__ ) if __name__ == "__main__": __lowerCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--mobilebert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained MobileBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __lowerCAmelCase : Optional[int] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
674
1
"""simple docstring""" import re def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" return [char.split() for char in re.split(r"""[^ a-z A-Z 0-9 \s]""" , str_ )] def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = split_input(str_ ) return "".join( ["""""".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" try: lowerCAmelCase__ = split_input(lowerCamelCase__ ) if upper: lowerCAmelCase__ = """""".join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: lowerCAmelCase__ = """""".join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" return to_simple_case(lowerCamelCase__ ) def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" try: lowerCAmelCase__ = to_simple_case(lowerCamelCase__ ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" return to_complex_case(lowerCamelCase__ , lowerCamelCase__ , """_""" ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" return to_complex_case(lowerCamelCase__ , lowerCamelCase__ , """-""" ) if __name__ == "__main__": __import__("doctest").testmod()
674
"""simple docstring""" def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), f"""The input value of [n={number}] is not an integer""" if number == 1: return 2 elif number < 1: lowerCAmelCase__ = f"""The input value of [n={number}] has to be > 0""" raise ValueError(lowerCamelCase__ ) else: lowerCAmelCase__ = sylvester(number - 1 ) lowerCAmelCase__ = num - 1 lowerCAmelCase__ = num return lower * upper + 1 if __name__ == "__main__": print(F"The 8th number in Sylvester's sequence: {sylvester(8)}")
674
1
"""simple docstring""" import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class a_ ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): lowerCAmelCase__ = """ylacombe/bark-small""" lowerCAmelCase__ = tempfile.mkdtemp() lowerCAmelCase__ = """en_speaker_1""" lowerCAmelCase__ = """This is a test string""" lowerCAmelCase__ = """speaker_embeddings_path.json""" lowerCAmelCase__ = """speaker_embeddings""" def _SCREAMING_SNAKE_CASE ( self : List[str] , **snake_case__ : Any ): return AutoTokenizer.from_pretrained(self.checkpoint , **snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Any ): shutil.rmtree(self.tmpdirname ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = BarkProcessor(tokenizer=snake_case__ ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase__ = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def _SCREAMING_SNAKE_CASE ( self : List[str] ): lowerCAmelCase__ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) lowerCAmelCase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) lowerCAmelCase__ = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): lowerCAmelCase__ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) lowerCAmelCase__ = 35 lowerCAmelCase__ = 2 lowerCAmelCase__ = 8 lowerCAmelCase__ = { """semantic_prompt""": np.ones(snake_case__ ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset lowerCAmelCase__ = processor(text=self.input_string , voice_preset=snake_case__ ) lowerCAmelCase__ = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(snake_case__ , np.array([] ) ).tolist() ) # test loading voice preset from npz file lowerCAmelCase__ = os.path.join(self.tmpdirname , """file.npz""" ) np.savez(snake_case__ , **snake_case__ ) lowerCAmelCase__ = processor(text=self.input_string , voice_preset=snake_case__ ) lowerCAmelCase__ = inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(snake_case__ , np.array([] ) ).tolist() ) # test loading voice preset from the hub lowerCAmelCase__ = processor(text=self.input_string , voice_preset=self.voice_preset ) def _SCREAMING_SNAKE_CASE ( self : str ): lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = BarkProcessor(tokenizer=snake_case__ ) lowerCAmelCase__ = processor(text=self.input_string ) lowerCAmelCase__ = tokenizer( self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=snake_case__ , return_attention_mask=snake_case__ , return_token_type_ids=snake_case__ , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
674
"""simple docstring""" import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __lowerCAmelCase : Optional[Any] = get_tests_dir("fixtures/test_sentencepiece_no_bos.model") @require_sentencepiece @require_tokenizers class a_ ( __UpperCamelCase , unittest.TestCase ): UpperCamelCase_ : Tuple = PegasusTokenizer UpperCamelCase_ : Any = PegasusTokenizerFast UpperCamelCase_ : int = True UpperCamelCase_ : Any = True def _SCREAMING_SNAKE_CASE ( self : Tuple ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ = PegasusTokenizer(snake_case__ ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): return PegasusTokenizer.from_pretrained("""google/pegasus-large""" ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **snake_case__ : Optional[Any] ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Optional[Any] ): return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): lowerCAmelCase__ = """</s>""" lowerCAmelCase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<pad>""" ) self.assertEqual(vocab_keys[1] , """</s>""" ) self.assertEqual(vocab_keys[-1] , """v""" ) self.assertEqual(len(snake_case__ ) , 1103 ) def _SCREAMING_SNAKE_CASE ( self : Any ): self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def _SCREAMING_SNAKE_CASE ( self : Dict ): lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ = ( """Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important""" """ </s> <pad> <pad> <pad>""" ) lowerCAmelCase__ = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] lowerCAmelCase__ = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Dict ): lowerCAmelCase__ = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word lowerCAmelCase__ = """<mask_1> To ensure a <mask_2> flow of bank resolutions.""" lowerCAmelCase__ = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1] lowerCAmelCase__ = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : int ): lowerCAmelCase__ = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 96103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 lowerCAmelCase__ = """To ensure a smooth flow of bank resolutions.""" lowerCAmelCase__ = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1] lowerCAmelCase__ = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def _SCREAMING_SNAKE_CASE ( self : List[str] ): lowerCAmelCase__ = ["""This is going to be way too long.""" * 150, """short example"""] lowerCAmelCase__ = ["""not super long but more than 5 tokens""", """tiny"""] lowerCAmelCase__ = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" ) lowerCAmelCase__ = self._large_tokenizer( text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(snake_case__ ) == 2 # input_ids, attention_mask. @slow def _SCREAMING_SNAKE_CASE ( self : str ): # fmt: off lowerCAmelCase__ = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case__ , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , ) @require_sentencepiece @require_tokenizers class a_ ( __UpperCamelCase , unittest.TestCase ): UpperCamelCase_ : str = PegasusTokenizer UpperCamelCase_ : Optional[int] = PegasusTokenizerFast UpperCamelCase_ : Union[str, Any] = True UpperCamelCase_ : Optional[int] = True def _SCREAMING_SNAKE_CASE ( self : List[str] ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ = PegasusTokenizer(snake_case__ , offset=0 , mask_token_sent=snake_case__ , mask_token="""[MASK]""" ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : Dict ): return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **snake_case__ : List[Any] ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Dict ): return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : List[str] ): lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ = ( """Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>""" """ <pad> <pad> <pad>""" ) lowerCAmelCase__ = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] lowerCAmelCase__ = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) @require_torch def _SCREAMING_SNAKE_CASE ( self : List[Any] ): lowerCAmelCase__ = ["""This is going to be way too long.""" * 1000, """short example"""] lowerCAmelCase__ = ["""not super long but more than 5 tokens""", """tiny"""] lowerCAmelCase__ = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" ) lowerCAmelCase__ = self._large_tokenizer( text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(snake_case__ ) == 2 # input_ids, attention_mask. def _SCREAMING_SNAKE_CASE ( self : List[Any] ): lowerCAmelCase__ = ( """This is an example string that is used to test the original TF implementation against the HF""" """ implementation""" ) lowerCAmelCase__ = self._large_tokenizer(snake_case__ ).input_ids self.assertListEqual( snake_case__ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
674
1
"""simple docstring""" import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class a_ ( unittest.TestCase ): @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _SCREAMING_SNAKE_CASE ( self : Dict ): lowerCAmelCase__ = ort.SessionOptions() lowerCAmelCase__ = False return options def _SCREAMING_SNAKE_CASE ( self : List[str] ): lowerCAmelCase__ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo.png""" ) lowerCAmelCase__ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" ) lowerCAmelCase__ = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" ) # using the PNDM scheduler by default lowerCAmelCase__ = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=snake_case__ ) lowerCAmelCase__ = """A red cat sitting on a park bench""" lowerCAmelCase__ = np.random.RandomState(0 ) lowerCAmelCase__ = pipe( prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=snake_case__ , output_type="""np""" , ) lowerCAmelCase__ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1E-2
674
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class a_ ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : int ): if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=snake_case__ , ) assert hasattr(self , """env""" ) def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Optional[Any] ): # configuration for running training on smdistributed Model Parallel lowerCAmelCase__ = { """enabled""": True, """processes_per_host""": 8, } lowerCAmelCase__ = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } lowerCAmelCase__ = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} lowerCAmelCase__ = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case__ , instance_type=self.instance_type , debugger_hook_config=snake_case__ , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 500, } , metric_definitions=self.env.metric_definitions , distribution=snake_case__ , py_version="""py36""" , ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : str ): TrainingJobAnalytics(snake_case__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : List[str] ): # create estimator lowerCAmelCase__ = self.create_estimator(snake_case__ ) # run training estimator.fit() # result dataframe lowerCAmelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) lowerCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCAmelCase__ = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , snake_case__ )
674
1
"""simple docstring""" import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device __lowerCAmelCase : Any = False class a_ ( unittest.TestCase ): pass @nightly @require_torch_gpu class a_ ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) lowerCAmelCase__ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = pipe.dual_guided( prompt="""first prompt""" , image=snake_case__ , text_to_image_strength=0.75 , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(snake_case__ ) lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained(snake_case__ , torch_dtype=torch.floataa ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) lowerCAmelCase__ = generator.manual_seed(0 ) lowerCAmelCase__ = pipe.dual_guided( prompt="""first prompt""" , image=snake_case__ , text_to_image_strength=0.75 , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def _SCREAMING_SNAKE_CASE ( self : Any ): lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) lowerCAmelCase__ = """cyberpunk 2077""" lowerCAmelCase__ = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = pipe.dual_guided( prompt=snake_case__ , image=snake_case__ , text_to_image_strength=0.75 , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images lowerCAmelCase__ = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase__ = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 lowerCAmelCase__ = """A painting of a squirrel eating a burger """ lowerCAmelCase__ = torch.manual_seed(0 ) lowerCAmelCase__ = pipe.text_to_image( prompt=snake_case__ , generator=snake_case__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images lowerCAmelCase__ = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 lowerCAmelCase__ = pipe.image_variation(snake_case__ , generator=snake_case__ , output_type="""numpy""" ).images lowerCAmelCase__ = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) lowerCAmelCase__ = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
674
"""simple docstring""" from math import pi, sqrt def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" if num <= 0: raise ValueError("""math domain error""" ) if num > 1_71.5: raise OverflowError("""math range error""" ) elif num - int(lowerCamelCase__ ) not in (0, 0.5): raise NotImplementedError("""num must be an integer or a half-integer""" ) elif num == 0.5: return sqrt(lowerCamelCase__ ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def _UpperCAmelCase ( ): """simple docstring""" assert gamma(0.5 ) == sqrt(lowerCamelCase__ ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() __lowerCAmelCase : Dict = 1.0 while num: __lowerCAmelCase : Any = float(input("Gamma of: ")) print(F"gamma({num}) = {gamma(num)}") print("\nEnter 0 to exit...")
674
1
"""simple docstring""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class a_ ( __UpperCamelCase ): UpperCamelCase_ : torch.FloatTensor UpperCamelCase_ : Optional[torch.FloatTensor] = None def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__=0.9_99 , lowerCamelCase__="cosine" , ): """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(lowerCamelCase__ ): return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(lowerCamelCase__ ): return math.exp(t * -12.0 ) else: raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) lowerCAmelCase__ = [] for i in range(lowerCamelCase__ ): lowerCAmelCase__ = i / num_diffusion_timesteps lowerCAmelCase__ = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(lowerCamelCase__ ) / alpha_bar_fn(lowerCamelCase__ ) , lowerCamelCase__ ) ) return torch.tensor(lowerCamelCase__ , dtype=torch.floataa ) class a_ ( __UpperCamelCase , __UpperCamelCase ): @register_to_config def __init__( self : List[Any] , snake_case__ : int = 1000 , snake_case__ : str = "fixed_small_log" , snake_case__ : bool = True , snake_case__ : Optional[float] = 1.0 , snake_case__ : str = "epsilon" , snake_case__ : str = "squaredcos_cap_v2" , ): if beta_schedule != "squaredcos_cap_v2": raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""" ) lowerCAmelCase__ = betas_for_alpha_bar(snake_case__ ) lowerCAmelCase__ = 1.0 - self.betas lowerCAmelCase__ = torch.cumprod(self.alphas , dim=0 ) lowerCAmelCase__ = torch.tensor(1.0 ) # standard deviation of the initial noise distribution lowerCAmelCase__ = 1.0 # setable values lowerCAmelCase__ = None lowerCAmelCase__ = torch.from_numpy(np.arange(0 , snake_case__ )[::-1].copy() ) lowerCAmelCase__ = variance_type def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : torch.FloatTensor , snake_case__ : Optional[int] = None ): return sample def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : int , snake_case__ : Union[str, torch.device] = None ): lowerCAmelCase__ = num_inference_steps lowerCAmelCase__ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) lowerCAmelCase__ = (np.arange(0 , snake_case__ ) * step_ratio).round()[::-1].copy().astype(np.intaa ) lowerCAmelCase__ = torch.from_numpy(snake_case__ ).to(snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : Optional[int] , snake_case__ : List[str]=None , snake_case__ : Optional[int]=None , snake_case__ : Tuple=None ): if prev_timestep is None: lowerCAmelCase__ = t - 1 lowerCAmelCase__ = self.alphas_cumprod[t] lowerCAmelCase__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one lowerCAmelCase__ = 1 - alpha_prod_t lowerCAmelCase__ = 1 - alpha_prod_t_prev if prev_timestep == t - 1: lowerCAmelCase__ = self.betas[t] else: lowerCAmelCase__ = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample lowerCAmelCase__ = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: lowerCAmelCase__ = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": lowerCAmelCase__ = torch.log(torch.clamp(snake_case__ , min=1E-20 ) ) lowerCAmelCase__ = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler lowerCAmelCase__ = variance.log() lowerCAmelCase__ = beta.log() lowerCAmelCase__ = (predicted_variance + 1) / 2 lowerCAmelCase__ = frac * max_log + (1 - frac) * min_log return variance def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : Optional[int] = None , snake_case__ : Optional[int]=None , snake_case__ : bool = True , ): lowerCAmelCase__ = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": lowerCAmelCase__ , lowerCAmelCase__ = torch.split(snake_case__ , sample.shape[1] , dim=1 ) else: lowerCAmelCase__ = None # 1. compute alphas, betas if prev_timestep is None: lowerCAmelCase__ = t - 1 lowerCAmelCase__ = self.alphas_cumprod[t] lowerCAmelCase__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one lowerCAmelCase__ = 1 - alpha_prod_t lowerCAmelCase__ = 1 - alpha_prod_t_prev if prev_timestep == t - 1: lowerCAmelCase__ = self.betas[t] lowerCAmelCase__ = self.alphas[t] else: lowerCAmelCase__ = 1 - alpha_prod_t / alpha_prod_t_prev lowerCAmelCase__ = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": lowerCAmelCase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": lowerCAmelCase__ = model_output else: raise ValueError( F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`""" """ for the UnCLIPScheduler.""" ) # 3. Clip "predicted x_0" if self.config.clip_sample: lowerCAmelCase__ = torch.clamp( snake_case__ , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowerCAmelCase__ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t lowerCAmelCase__ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowerCAmelCase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise lowerCAmelCase__ = 0 if t > 0: lowerCAmelCase__ = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=snake_case__ , device=model_output.device ) lowerCAmelCase__ = self._get_variance( snake_case__ , predicted_variance=snake_case__ , prev_timestep=snake_case__ , ) if self.variance_type == "fixed_small_log": lowerCAmelCase__ = variance elif self.variance_type == "learned_range": lowerCAmelCase__ = (0.5 * variance).exp() else: raise ValueError( F"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`""" """ for the UnCLIPScheduler.""" ) lowerCAmelCase__ = variance * variance_noise lowerCAmelCase__ = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : torch.FloatTensor , snake_case__ : torch.FloatTensor , snake_case__ : torch.IntTensor , ): # Make sure alphas_cumprod and timestep have same device and dtype as original_samples lowerCAmelCase__ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) lowerCAmelCase__ = timesteps.to(original_samples.device ) lowerCAmelCase__ = alphas_cumprod[timesteps] ** 0.5 lowerCAmelCase__ = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): lowerCAmelCase__ = sqrt_alpha_prod.unsqueeze(-1 ) lowerCAmelCase__ = (1 - alphas_cumprod[timesteps]) ** 0.5 lowerCAmelCase__ = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): lowerCAmelCase__ = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) lowerCAmelCase__ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
674
"""simple docstring""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class a_ : def __init__( self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Any=13 , snake_case__ : int=30 , snake_case__ : int=2 , snake_case__ : Union[str, Any]=3 , snake_case__ : Dict=True , snake_case__ : Optional[int]=True , snake_case__ : List[Any]=32 , snake_case__ : List[str]=2 , snake_case__ : Optional[Any]=4 , snake_case__ : Optional[int]=37 , snake_case__ : Tuple="gelu" , snake_case__ : str=0.1 , snake_case__ : Any=0.1 , snake_case__ : int=10 , snake_case__ : Dict=0.02 , snake_case__ : Union[str, Any]=3 , snake_case__ : str=None , snake_case__ : List[Any]=2 , ): lowerCAmelCase__ = parent lowerCAmelCase__ = batch_size lowerCAmelCase__ = image_size lowerCAmelCase__ = patch_size lowerCAmelCase__ = num_channels lowerCAmelCase__ = is_training lowerCAmelCase__ = use_labels lowerCAmelCase__ = hidden_size lowerCAmelCase__ = num_hidden_layers lowerCAmelCase__ = num_attention_heads lowerCAmelCase__ = intermediate_size lowerCAmelCase__ = hidden_act lowerCAmelCase__ = hidden_dropout_prob lowerCAmelCase__ = attention_probs_dropout_prob lowerCAmelCase__ = type_sequence_label_size lowerCAmelCase__ = initializer_range lowerCAmelCase__ = scope lowerCAmelCase__ = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) lowerCAmelCase__ = (image_size // patch_size) ** 2 lowerCAmelCase__ = num_patches + 2 def _SCREAMING_SNAKE_CASE ( self : Any ): lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ = None if self.use_labels: lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : List[Any] ): return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : List[str] ): lowerCAmelCase__ = TFDeiTModel(config=snake_case__ ) lowerCAmelCase__ = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict ): lowerCAmelCase__ = TFDeiTForMaskedImageModeling(config=snake_case__ ) lowerCAmelCase__ = model(snake_case__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCAmelCase__ = 1 lowerCAmelCase__ = TFDeiTForMaskedImageModeling(snake_case__ ) lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase__ = model(snake_case__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Tuple ): lowerCAmelCase__ = self.type_sequence_label_size lowerCAmelCase__ = TFDeiTForImageClassification(snake_case__ ) lowerCAmelCase__ = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCAmelCase__ = 1 lowerCAmelCase__ = TFDeiTForImageClassification(snake_case__ ) lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase__ = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): lowerCAmelCase__ = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs lowerCAmelCase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class a_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ): UpperCamelCase_ : Optional[Any] = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) UpperCamelCase_ : Any = ( { "feature-extraction": TFDeiTModel, "image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) UpperCamelCase_ : Optional[Any] = False UpperCamelCase_ : Optional[Any] = False UpperCamelCase_ : Optional[int] = False UpperCamelCase_ : int = False def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): lowerCAmelCase__ = TFDeiTModelTester(self ) lowerCAmelCase__ = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="""DeiT does not use inputs_embeds""" ) def _SCREAMING_SNAKE_CASE ( self : Any ): pass def _SCREAMING_SNAKE_CASE ( self : str ): lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ = model_class(snake_case__ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) lowerCAmelCase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case__ , tf.keras.layers.Dense ) ) def _SCREAMING_SNAKE_CASE ( self : Dict ): lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ = model_class(snake_case__ ) lowerCAmelCase__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ = [*signature.parameters.keys()] lowerCAmelCase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : int ): lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : int ): lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Union[str, Any]=False ): lowerCAmelCase__ = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def _SCREAMING_SNAKE_CASE ( self : Any ): for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ = TFDeiTModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def _UpperCAmelCase ( ): """simple docstring""" lowerCAmelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class a_ ( unittest.TestCase ): @cached_property def _SCREAMING_SNAKE_CASE ( self : Any ): return ( DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ) if is_vision_available() else None ) @slow def _SCREAMING_SNAKE_CASE ( self : List[Any] ): lowerCAmelCase__ = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ) lowerCAmelCase__ = self.default_image_processor lowerCAmelCase__ = prepare_img() lowerCAmelCase__ = image_processor(images=snake_case__ , return_tensors="""tf""" ) # forward pass lowerCAmelCase__ = model(**snake_case__ ) # verify the logits lowerCAmelCase__ = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , snake_case__ ) lowerCAmelCase__ = tf.constant([-1.0266, 0.1912, -1.2861] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
674
1
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class a_ ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : Tuple ): lowerCAmelCase__ = tempfile.mkdtemp() # fmt: off lowerCAmelCase__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""] # fmt: on lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) lowerCAmelCase__ = { """do_resize""": True, """size""": {"""height""": 18, """width""": 18}, """do_normalize""": True, """image_mean""": [0.5, 0.5, 0.5], """image_std""": [0.5, 0.5, 0.5], } lowerCAmelCase__ = os.path.join(self.tmpdirname , snake_case__ ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Dict , **snake_case__ : str ): return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Tuple , **snake_case__ : Dict ): return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): shutil.rmtree(self.tmpdirname ) def _SCREAMING_SNAKE_CASE ( self : int ): lowerCAmelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowerCAmelCase__ = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _SCREAMING_SNAKE_CASE ( self : Dict ): lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = self.get_image_processor() lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase__ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Any ): lowerCAmelCase__ = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) lowerCAmelCase__ = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 ) lowerCAmelCase__ = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): lowerCAmelCase__ = self.get_image_processor() lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) lowerCAmelCase__ = self.prepare_image_inputs() lowerCAmelCase__ = image_processor(snake_case__ , return_tensors="""np""" ) lowerCAmelCase__ = processor(images=snake_case__ , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): lowerCAmelCase__ = self.get_image_processor() lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) lowerCAmelCase__ = """lower newer""" lowerCAmelCase__ = processor(text=snake_case__ ) lowerCAmelCase__ = tokenizer(snake_case__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _SCREAMING_SNAKE_CASE ( self : Dict ): lowerCAmelCase__ = self.get_image_processor() lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) lowerCAmelCase__ = """lower newer""" lowerCAmelCase__ = self.prepare_image_inputs() lowerCAmelCase__ = processor(text=snake_case__ , images=snake_case__ ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with self.assertRaises(snake_case__ ): processor() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): lowerCAmelCase__ = self.get_image_processor() lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCAmelCase__ = processor.batch_decode(snake_case__ ) lowerCAmelCase__ = tokenizer.batch_decode(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): lowerCAmelCase__ = self.get_image_processor() lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = VisionTextDualEncoderProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) lowerCAmelCase__ = """lower newer""" lowerCAmelCase__ = self.prepare_image_inputs() lowerCAmelCase__ = processor(text=snake_case__ , images=snake_case__ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
674
"""simple docstring""" from __future__ import annotations from math import gcd def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 2 , lowerCamelCase__ = 1 , lowerCamelCase__ = 3 , ): """simple docstring""" if num < 2: raise ValueError("""The input value cannot be less than 2""" ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int: return (pow(lowerCamelCase__ , 2 ) + step) % modulus for _ in range(lowerCamelCase__ ): # These track the position within the cycle detection logic. lowerCAmelCase__ = seed lowerCAmelCase__ = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. lowerCAmelCase__ = gcd(hare - tortoise , lowerCamelCase__ ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. lowerCAmelCase__ = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse __lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( "num", type=int, help="The value to find a divisor of", ) parser.add_argument( "--attempts", type=int, default=3, help="The number of attempts before giving up", ) __lowerCAmelCase : List[str] = parser.parse_args() __lowerCAmelCase : Dict = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(F"{args.num} is probably prime") else: __lowerCAmelCase : List[str] = args.num // divisor print(F"{args.num} = {divisor} * {quotient}")
674
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __lowerCAmelCase : List[str] = logging.get_logger(__name__) __lowerCAmelCase : Tuple = { "facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json", } class a_ ( __UpperCamelCase , __UpperCamelCase ): UpperCamelCase_ : Optional[int] = "convnextv2" def __init__( self : str , snake_case__ : Any=3 , snake_case__ : List[Any]=4 , snake_case__ : Optional[Any]=4 , snake_case__ : Optional[int]=None , snake_case__ : Tuple=None , snake_case__ : int="gelu" , snake_case__ : Any=0.02 , snake_case__ : Optional[Any]=1E-12 , snake_case__ : Dict=0.0 , snake_case__ : List[str]=224 , snake_case__ : List[Any]=None , snake_case__ : Any=None , **snake_case__ : Union[str, Any] , ): super().__init__(**snake_case__ ) lowerCAmelCase__ = num_channels lowerCAmelCase__ = patch_size lowerCAmelCase__ = num_stages lowerCAmelCase__ = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes lowerCAmelCase__ = [3, 3, 9, 3] if depths is None else depths lowerCAmelCase__ = hidden_act lowerCAmelCase__ = initializer_range lowerCAmelCase__ = layer_norm_eps lowerCAmelCase__ = drop_path_rate lowerCAmelCase__ = image_size lowerCAmelCase__ = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] lowerCAmelCase__ , lowerCAmelCase__ = get_aligned_output_features_output_indices( out_features=snake_case__ , out_indices=snake_case__ , stage_names=self.stage_names )
674
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = TapasConfig.from_json_file(lowerCamelCase__ ) # set absolute/relative position embeddings parameter lowerCAmelCase__ = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ ) elif task == "WTQ": # run_task_main.py hparams lowerCAmelCase__ = 4 lowerCAmelCase__ = True # hparam_utils.py hparams lowerCAmelCase__ = 0.66_46_94 lowerCAmelCase__ = 0.20_79_51 lowerCAmelCase__ = 0.12_11_94 lowerCAmelCase__ = True lowerCAmelCase__ = True lowerCAmelCase__ = False lowerCAmelCase__ = 0.0_35_25_13 lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams lowerCAmelCase__ = 4 lowerCAmelCase__ = False # hparam_utils.py hparams lowerCAmelCase__ = 36.45_19 lowerCAmelCase__ = 0.90_34_21 lowerCAmelCase__ = 2_22.0_88 lowerCAmelCase__ = True lowerCAmelCase__ = True lowerCAmelCase__ = True lowerCAmelCase__ = 0.76_31_41 lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ ) elif task == "TABFACT": lowerCAmelCase__ = TapasForSequenceClassification(config=lowerCamelCase__ ) elif task == "MLM": lowerCAmelCase__ = TapasForMaskedLM(config=lowerCamelCase__ ) elif task == "INTERMEDIATE_PRETRAINING": lowerCAmelCase__ = TapasModel(config=lowerCamelCase__ ) else: raise ValueError(f"""Task {task} not supported.""" ) print(f"""Building PyTorch model from configuration: {config}""" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Save pytorch-model (weights and configuration) print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(lowerCamelCase__ ) # Save tokenizer files print(f"""Save tokenizer files to {pytorch_dump_path}""" ) lowerCAmelCase__ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 ) tokenizer.save_pretrained(lowerCamelCase__ ) print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": __lowerCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA." ) parser.add_argument( "--reset_position_index_per_cell", default=False, action="store_true", help="Whether to use relative position embeddings or not. Defaults to True.", ) parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--tapas_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained TAPAS model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __lowerCAmelCase : Union[str, Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
674
1
"""simple docstring""" import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class a_ : UpperCamelCase_ : Optional[Union[str, Path]] = None UpperCamelCase_ : bool = False UpperCamelCase_ : bool = False UpperCamelCase_ : bool = False UpperCamelCase_ : Optional[Dict] = None UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : bool = False UpperCamelCase_ : bool = False UpperCamelCase_ : bool = False UpperCamelCase_ : bool = True UpperCamelCase_ : Optional[int] = None UpperCamelCase_ : int = 1 UpperCamelCase_ : Optional[Union[str, bool]] = None UpperCamelCase_ : bool = False UpperCamelCase_ : Optional[Dict] = None UpperCamelCase_ : Optional[str] = None def _SCREAMING_SNAKE_CASE ( self : str ): return self.__class__(**{k: copy.deepcopy(snake_case__ ) for k, v in self.__dict__.items()} )
674
"""simple docstring""" def _UpperCAmelCase ( lowerCamelCase__ = 50 ): """simple docstring""" lowerCAmelCase__ = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(F"{solution() = }")
674
1
"""simple docstring""" import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin __lowerCAmelCase : Tuple = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right __lowerCAmelCase : Tuple = 25_00_04 __lowerCAmelCase : Any = 25_00_20 @require_sentencepiece @require_tokenizers class a_ ( __UpperCamelCase , unittest.TestCase ): UpperCamelCase_ : Dict = MBartTokenizer UpperCamelCase_ : Optional[int] = MBartTokenizerFast UpperCamelCase_ : Tuple = True UpperCamelCase_ : Tuple = True def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ = MBartTokenizer(snake_case__ , keep_accents=snake_case__ ) tokenizer.save_pretrained(self.tmpdirname ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): lowerCAmelCase__ = MBartTokenizer(snake_case__ , keep_accents=snake_case__ ) lowerCAmelCase__ = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(snake_case__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowerCAmelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( snake_case__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(snake_case__ ) self.assertListEqual( snake_case__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(snake_case__ ) self.assertListEqual( snake_case__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def _SCREAMING_SNAKE_CASE ( self : str ): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return lowerCAmelCase__ = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) lowerCAmelCase__ = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) lowerCAmelCase__ = tempfile.mkdtemp() lowerCAmelCase__ = tokenizer_r.save_pretrained(snake_case__ ) lowerCAmelCase__ = tokenizer_p.save_pretrained(snake_case__ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) lowerCAmelCase__ = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f ) self.assertSequenceEqual(snake_case__ , snake_case__ ) # Checks everything loads correctly in the same way lowerCAmelCase__ = tokenizer_r.from_pretrained(snake_case__ ) lowerCAmelCase__ = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ , snake_case__ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(snake_case__ ) # Save tokenizer rust, legacy_format=True lowerCAmelCase__ = tempfile.mkdtemp() lowerCAmelCase__ = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ ) lowerCAmelCase__ = tokenizer_p.save_pretrained(snake_case__ ) # Checks it save with the same files self.assertSequenceEqual(snake_case__ , snake_case__ ) # Checks everything loads correctly in the same way lowerCAmelCase__ = tokenizer_r.from_pretrained(snake_case__ ) lowerCAmelCase__ = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ , snake_case__ ) ) shutil.rmtree(snake_case__ ) # Save tokenizer rust, legacy_format=False lowerCAmelCase__ = tempfile.mkdtemp() lowerCAmelCase__ = tokenizer_r.save_pretrained(snake_case__ , legacy_format=snake_case__ ) lowerCAmelCase__ = tokenizer_p.save_pretrained(snake_case__ ) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way lowerCAmelCase__ = tokenizer_r.from_pretrained(snake_case__ ) lowerCAmelCase__ = tokenizer_p.from_pretrained(snake_case__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(snake_case__ , snake_case__ ) ) shutil.rmtree(snake_case__ ) @require_torch @require_sentencepiece @require_tokenizers class a_ ( unittest.TestCase ): UpperCamelCase_ : Any = "facebook/mbart-large-en-ro" UpperCamelCase_ : str = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] UpperCamelCase_ : Optional[Any] = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] UpperCamelCase_ : Optional[Any] = [8274, 12_7873, 2_5916, 7, 8622, 2071, 438, 6_7485, 53, 18_7895, 23, 5_1712, 2, EN_CODE] @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ): lowerCAmelCase__ = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" ) lowerCAmelCase__ = 1 return cls def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250020 ) def _SCREAMING_SNAKE_CASE ( self : Dict ): lowerCAmelCase__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): self.assertIn(snake_case__ , self.tokenizer.all_special_ids ) lowerCAmelCase__ = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] lowerCAmelCase__ = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ ) lowerCAmelCase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) self.assertNotIn(self.tokenizer.eos_token , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): lowerCAmelCase__ = ["""this is gunna be a long sentence """ * 20] assert isinstance(src_text[0] , snake_case__ ) lowerCAmelCase__ = 10 lowerCAmelCase__ = self.tokenizer(snake_case__ , max_length=snake_case__ , truncation=snake_case__ ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , snake_case__ ) self.assertEqual(len(snake_case__ ) , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250026, 250001] ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): lowerCAmelCase__ = tempfile.mkdtemp() lowerCAmelCase__ = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(snake_case__ ) lowerCAmelCase__ = MBartTokenizer.from_pretrained(snake_case__ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , snake_case__ ) @require_torch def _SCREAMING_SNAKE_CASE ( self : int ): lowerCAmelCase__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=snake_case__ , return_tensors="""pt""" ) lowerCAmelCase__ = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def _SCREAMING_SNAKE_CASE ( self : Tuple ): lowerCAmelCase__ = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , ) lowerCAmelCase__ = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) self.assertIsInstance(snake_case__ , snake_case__ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) lowerCAmelCase__ = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , snake_case__ ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] ) def _SCREAMING_SNAKE_CASE ( self : int ): lowerCAmelCase__ = self.tokenizer(self.src_text , padding=snake_case__ , truncation=snake_case__ , max_length=3 , return_tensors="""pt""" ) lowerCAmelCase__ = self.tokenizer( text_target=self.tgt_text , padding=snake_case__ , truncation=snake_case__ , max_length=10 , return_tensors="""pt""" ) lowerCAmelCase__ = targets["""input_ids"""] lowerCAmelCase__ = shift_tokens_right(snake_case__ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def _SCREAMING_SNAKE_CASE ( self : List[Any] ): lowerCAmelCase__ = self.tokenizer._build_translation_inputs( """A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" ) self.assertEqual( nested_simplify(snake_case__ ) , { # A, test, EOS, en_XX """input_ids""": [[62, 3034, 2, 250004]], """attention_mask""": [[1, 1, 1, 1]], # ar_AR """forced_bos_token_id""": 250001, } , )
674
"""simple docstring""" import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse("0.8.3"): raise Exception("requires gluonnlp == 0.8.3") if version.parse(mx.__version__) != version.parse("1.5.0"): raise Exception("requires mxnet == 1.5.0") logging.set_verbosity_info() __lowerCAmelCase : Any = logging.get_logger(__name__) __lowerCAmelCase : Any = "The Nymphenburg Palace is a beautiful palace in Munich!" def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = { """attention_cell""": """multi_head""", """num_layers""": 4, """units""": 1024, """hidden_size""": 768, """max_length""": 512, """num_heads""": 8, """scaled""": True, """dropout""": 0.1, """use_residual""": True, """embed_size""": 1024, """embed_dropout""": 0.1, """word_embed""": None, """layer_norm_eps""": 1e-5, """token_type_vocab_size""": 2, } lowerCAmelCase__ = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py lowerCAmelCase__ = BERTEncoder( attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=lowerCamelCase__ , output_all_encodings=lowerCamelCase__ , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , lowerCamelCase__ ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later lowerCAmelCase__ = """openwebtext_ccnews_stories_books_cased""" # Specify download folder to Gluonnlp's vocab lowerCAmelCase__ = os.path.join(get_home_dir() , """models""" ) lowerCAmelCase__ = _load_vocab(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , cls=lowerCamelCase__ ) lowerCAmelCase__ = nlp.model.BERTModel( lowerCamelCase__ , len(lowerCamelCase__ ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=lowerCamelCase__ , use_token_type_embed=lowerCamelCase__ , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=lowerCamelCase__ , use_decoder=lowerCamelCase__ , ) original_bort.load_parameters(lowerCamelCase__ , cast_dtype=lowerCamelCase__ , ignore_extra=lowerCamelCase__ ) lowerCAmelCase__ = original_bort._collect_params_with_prefix() # Build our config 🤗 lowerCAmelCase__ = { """architectures""": ["""BertForMaskedLM"""], """attention_probs_dropout_prob""": predefined_args["""dropout"""], """hidden_act""": """gelu""", """hidden_dropout_prob""": predefined_args["""dropout"""], """hidden_size""": predefined_args["""embed_size"""], """initializer_range""": 0.02, """intermediate_size""": predefined_args["""hidden_size"""], """layer_norm_eps""": predefined_args["""layer_norm_eps"""], """max_position_embeddings""": predefined_args["""max_length"""], """model_type""": """bort""", """num_attention_heads""": predefined_args["""num_heads"""], """num_hidden_layers""": predefined_args["""num_layers"""], """pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa """type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa """vocab_size""": len(lowerCamelCase__ ), } lowerCAmelCase__ = BertConfig.from_dict(lowerCamelCase__ ) lowerCAmelCase__ = BertForMaskedLM(lowerCamelCase__ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(lowerCamelCase__ ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(lowerCamelCase__ , lowerCamelCase__ ): lowerCAmelCase__ = hf_param.shape lowerCAmelCase__ = to_torch(params[gluon_param] ) lowerCAmelCase__ = gluon_param.shape assert ( shape_hf == shape_gluon ), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers""" return gluon_param lowerCAmelCase__ = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" ) lowerCAmelCase__ = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" ) lowerCAmelCase__ = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" ) lowerCAmelCase__ = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) lowerCAmelCase__ = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): lowerCAmelCase__ = hf_bort_model.bert.encoder.layer[i] # self attention lowerCAmelCase__ = layer.attention.self lowerCAmelCase__ = check_and_map_params( self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" ) lowerCAmelCase__ = check_and_map_params( self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" ) lowerCAmelCase__ = check_and_map_params( self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" ) lowerCAmelCase__ = check_and_map_params( self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" ) lowerCAmelCase__ = check_and_map_params( self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" ) lowerCAmelCase__ = check_and_map_params( self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" ) # self attention output lowerCAmelCase__ = layer.attention.output lowerCAmelCase__ = check_and_map_params( self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" ) lowerCAmelCase__ = check_and_map_params( self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" ) lowerCAmelCase__ = check_and_map_params( self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" ) lowerCAmelCase__ = check_and_map_params( self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" ) # intermediate lowerCAmelCase__ = layer.intermediate lowerCAmelCase__ = check_and_map_params( intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" ) lowerCAmelCase__ = check_and_map_params( intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" ) # output lowerCAmelCase__ = layer.output lowerCAmelCase__ = check_and_map_params( bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" ) lowerCAmelCase__ = check_and_map_params( bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" ) lowerCAmelCase__ = check_and_map_params( bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" ) lowerCAmelCase__ = check_and_map_params( bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models lowerCAmelCase__ = RobertaTokenizer.from_pretrained("""roberta-base""" ) lowerCAmelCase__ = tokenizer.encode_plus(lowerCamelCase__ )["""input_ids"""] # Get gluon output lowerCAmelCase__ = mx.nd.array([input_ids] ) lowerCAmelCase__ = original_bort(inputs=lowerCamelCase__ , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(lowerCamelCase__ ) lowerCAmelCase__ = BertModel.from_pretrained(lowerCamelCase__ ) hf_bort_model.eval() lowerCAmelCase__ = tokenizer.encode_plus(lowerCamelCase__ , return_tensors="""pt""" ) lowerCAmelCase__ = hf_bort_model(**lowerCamelCase__ )[0] lowerCAmelCase__ = output_gluon[0].asnumpy() lowerCAmelCase__ = output_hf[0].detach().numpy() lowerCAmelCase__ = np.max(np.abs(hf_layer - gluon_layer ) ).item() lowerCAmelCase__ = np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) if success: print("""✔️ Both model do output the same tensors""" ) else: print("""❌ Both model do **NOT** output the same tensors""" ) print("""Absolute difference is:""" , lowerCamelCase__ ) if __name__ == "__main__": __lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __lowerCAmelCase : str = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
674
1
"""simple docstring""" import numpy as np import torch from imwatermark import WatermarkEncoder # Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66 __lowerCAmelCase : Optional[int] = 0b101100111110110010010000011110111011000110011110 # bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 __lowerCAmelCase : str = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] class a_ : def __init__( self : Union[str, Any] ): lowerCAmelCase__ = WATERMARK_BITS lowerCAmelCase__ = WatermarkEncoder() self.encoder.set_watermark("""bits""" , self.watermark ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : torch.FloatTensor ): # can't encode images that are smaller than 256 if images.shape[-1] < 256: return images lowerCAmelCase__ = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy() lowerCAmelCase__ = [self.encoder.encode(snake_case__ , """dwtDct""" ) for image in images] lowerCAmelCase__ = torch.from_numpy(np.array(snake_case__ ) ).permute(0 , 3 , 1 , 2 ) lowerCAmelCase__ = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 ) return images
674
"""simple docstring""" import copy import os import cva import numpy as np from matplotlib import pyplot as plt class a_ : def __init__( self : Optional[int] ): lowerCAmelCase__ = """""" lowerCAmelCase__ = """""" lowerCAmelCase__ = [] lowerCAmelCase__ = 0 lowerCAmelCase__ = 256 lowerCAmelCase__ = 0 lowerCAmelCase__ = 0 lowerCAmelCase__ = 0 lowerCAmelCase__ = 0 def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Union[str, Any] ): lowerCAmelCase__ = cva.imread(snake_case__ , 0 ) lowerCAmelCase__ = copy.deepcopy(self.img ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = plt.hist(self.img.ravel() , 256 , [0, 256] , label="""x""" ) lowerCAmelCase__ = np.sum(snake_case__ ) for i in range(len(snake_case__ ) ): lowerCAmelCase__ = x[i] / self.k self.sk += prk lowerCAmelCase__ = (self.L - 1) * self.sk if self.rem != 0: lowerCAmelCase__ = int(last % last ) lowerCAmelCase__ = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(snake_case__ ) lowerCAmelCase__ = int(np.ma.count(self.img ) / self.img[1].size ) lowerCAmelCase__ = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): lowerCAmelCase__ = self.img[j][i] if num != self.last_list[num]: lowerCAmelCase__ = self.last_list[num] cva.imwrite("""output_data/output.jpg""" , self.img ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): plt.hist(self.img.ravel() , 256 , [0, 256] ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): cva.imshow("""Output-Image""" , self.img ) cva.imshow("""Input-Image""" , self.original_image ) cva.waitKey(5000 ) cva.destroyAllWindows() if __name__ == "__main__": __lowerCAmelCase : Dict = os.path.join(os.path.basename(__file__), "image_data/input.jpg") __lowerCAmelCase : Optional[int] = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
674
1
"""simple docstring""" __lowerCAmelCase : Tuple = "Alexander Joslin" import operator as op from .stack import Stack def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub} lowerCAmelCase__ = Stack() lowerCAmelCase__ = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(lowerCamelCase__ ) ) elif i in operators: # RULE 2 operator_stack.push(lowerCamelCase__ ) elif i == ")": # RULE 4 lowerCAmelCase__ = operator_stack.peek() operator_stack.pop() lowerCAmelCase__ = operand_stack.peek() operand_stack.pop() lowerCAmelCase__ = operand_stack.peek() operand_stack.pop() lowerCAmelCase__ = operators[opr](lowerCamelCase__ , lowerCamelCase__ ) operand_stack.push(lowerCamelCase__ ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": __lowerCAmelCase : Optional[Any] = "(5 + ((4 * 2) * (2 + 3)))" # answer = 45 print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
674
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class a_ ( __UpperCamelCase ): UpperCamelCase_ : List[str] = ( "This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image." "It takes two arguments named `image` which should be the original image, and `label` which should be a text " "describing the elements what should be identified in the segmentation mask. The tool returns the mask." ) UpperCamelCase_ : str = "CIDAS/clipseg-rd64-refined" UpperCamelCase_ : Any = "image_segmenter" UpperCamelCase_ : Optional[Any] = CLIPSegForImageSegmentation UpperCamelCase_ : List[str] = ["image", "text"] UpperCamelCase_ : int = ["image"] def __init__( self : Tuple , *snake_case__ : str , **snake_case__ : Optional[Any] ): requires_backends(self , ["""vision"""] ) super().__init__(*snake_case__ , **snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : "Image" , snake_case__ : str ): return self.pre_processor(text=[label] , images=[image] , padding=snake_case__ , return_tensors="""pt""" ) def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Tuple ): with torch.no_grad(): lowerCAmelCase__ = self.model(**snake_case__ ).logits return logits def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : List[Any] ): lowerCAmelCase__ = outputs.cpu().detach().numpy() lowerCAmelCase__ = 0 lowerCAmelCase__ = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
674
1
"""simple docstring""" import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path __lowerCAmelCase : Union[str, Any] = [ {"dataset": "wikipedia", "config_name": "20220301.de"}, {"dataset": "wikipedia", "config_name": "20220301.en"}, {"dataset": "wikipedia", "config_name": "20220301.fr"}, {"dataset": "wikipedia", "config_name": "20220301.frr"}, {"dataset": "wikipedia", "config_name": "20220301.it"}, {"dataset": "wikipedia", "config_name": "20220301.simple"}, {"dataset": "snli", "config_name": "plain_text"}, {"dataset": "eli5", "config_name": "LFQA_reddit"}, {"dataset": "wiki40b", "config_name": "en"}, {"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"}, {"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"}, {"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"}, {"dataset": "natural_questions", "config_name": "default"}, ] def _UpperCAmelCase ( lowerCamelCase__=True ): """simple docstring""" if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__UpperCamelCase ) ) class a_ ( __UpperCamelCase ): UpperCamelCase_ : Dict = None UpperCamelCase_ : Dict = None def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Any , snake_case__ : Tuple ): with TemporaryDirectory() as tmp_dir: lowerCAmelCase__ = dataset_module_factory(snake_case__ , cache_dir=snake_case__ ) lowerCAmelCase__ = import_main_class(dataset_module.module_path , dataset=snake_case__ ) lowerCAmelCase__ = builder_cls( cache_dir=snake_case__ , config_name=snake_case__ , hash=dataset_module.hash , ) lowerCAmelCase__ = """/""".join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=snake_case__ ).replace(os.sep , """/""" ), config.DATASET_INFO_FILENAME, ] ) lowerCAmelCase__ = cached_path(snake_case__ , cache_dir=snake_case__ ) self.assertTrue(os.path.exists(snake_case__ ) ) @pytest.mark.integration def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple""" lowerCAmelCase__ = dataset_module_factory("""wikipedia""" , cache_dir=lowerCamelCase__ ) lowerCAmelCase__ = import_main_class(dataset_module.module_path ) lowerCAmelCase__ = builder_cls( cache_dir=lowerCamelCase__ , config_name="""20220301.frr""" , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam lowerCAmelCase__ = None builder_instance.download_and_prepare() lowerCAmelCase__ = builder_instance.as_dataset() assert ds @pytest.mark.integration def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = dataset_module_factory("""wikipedia""" , cache_dir=lowerCamelCase__ ) lowerCAmelCase__ = import_main_class(dataset_module.module_path , dataset=lowerCamelCase__ ) lowerCAmelCase__ = builder_cls( cache_dir=lowerCamelCase__ , config_name="""20220301.frr""" , hash=dataset_module.hash , ) lowerCAmelCase__ = builder_instance.as_streaming_dataset() assert ds assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) assert "train" in ds assert isinstance(ds["""train"""] , lowerCamelCase__ ) assert next(iter(ds["""train"""] ) )
674
"""simple docstring""" import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class a_ ( __UpperCamelCase , unittest.TestCase ): UpperCamelCase_ : str = LayoutLMTokenizer UpperCamelCase_ : List[Any] = LayoutLMTokenizerFast UpperCamelCase_ : Dict = True UpperCamelCase_ : Any = True def _SCREAMING_SNAKE_CASE ( self : Tuple ): super().setUp() lowerCAmelCase__ = [ """[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def _SCREAMING_SNAKE_CASE ( self : int , **snake_case__ : Union[str, Any] ): return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Tuple ): lowerCAmelCase__ = """UNwant\u00E9d,running""" lowerCAmelCase__ = """unwanted, running""" return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): lowerCAmelCase__ = self.tokenizer_class(self.vocab_file ) lowerCAmelCase__ = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(snake_case__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [7, 4, 5, 10, 8, 9] ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): pass
674
1
"""simple docstring""" def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = [] for data in source_data: for i, el in enumerate(lowerCamelCase__ ): if len(lowerCamelCase__ ) < i + 1: data_lists.append([] ) data_lists[i].append(float(lowerCamelCase__ ) ) return data_lists def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = [] for dlist, weight in zip(lowerCamelCase__ , lowerCamelCase__ ): lowerCAmelCase__ = min(lowerCamelCase__ ) lowerCAmelCase__ = max(lowerCamelCase__ ) lowerCAmelCase__ = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: lowerCAmelCase__ = f"""Invalid weight of {weight:f} provided""" raise ValueError(lowerCamelCase__ ) score_lists.append(lowerCamelCase__ ) return score_lists def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(lowerCamelCase__ ): lowerCAmelCase__ = final_scores[j] + ele return final_scores def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = get_data(lowerCamelCase__ ) lowerCAmelCase__ = calculate_each_score(lowerCamelCase__ , lowerCamelCase__ ) lowerCAmelCase__ = generate_final_scores(lowerCamelCase__ ) # append scores to source data for i, ele in enumerate(lowerCamelCase__ ): source_data[i].append(lowerCamelCase__ ) return source_data
674
"""simple docstring""" from binascii import hexlify from hashlib import shaaaa from os import urandom # RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for # Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526 __lowerCAmelCase : Any = { # 1536-bit 5: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, # 2048-bit 14: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AACAA68FFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, # 3072-bit 15: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, # 4096-bit 16: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199" + "FFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, # 6144-bit 17: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08" + "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B" + "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9" + "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6" + "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8" + "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C" + "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718" + "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D" + "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D" + "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226" + "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC" + "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26" + "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB" + "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2" + "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127" + "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406" + "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918" + "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151" + "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03" + "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F" + "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B" + "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632" + "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E" + "6DCC4024FFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, # 8192-bit 18: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD" + "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831" + "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B" + "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF" + "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6" + "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3" + "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328" + "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C" + "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE" + "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4" + "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300" + "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568" + "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9" + "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B" + "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A" + "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36" + "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1" + "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92" + "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47" + "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71" + "60C980DD98EDD3DFFFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, } class a_ : def __init__( self : List[str] , snake_case__ : int = 14 ): if group not in primes: raise ValueError("""Unsupported Group""" ) lowerCAmelCase__ = primes[group]["""prime"""] lowerCAmelCase__ = primes[group]["""generator"""] lowerCAmelCase__ = int(hexlify(urandom(32 ) ) , base=16 ) def _SCREAMING_SNAKE_CASE ( self : Any ): return hex(self.__private_key )[2:] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): lowerCAmelCase__ = pow(self.generator , self.__private_key , self.prime ) return hex(snake_case__ )[2:] def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : int ): # check if the other public key is valid based on NIST SP800-56 return ( 2 <= key <= self.prime - 2 and pow(snake_case__ , (self.prime - 1) // 2 , self.prime ) == 1 ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : str ): lowerCAmelCase__ = int(snake_case__ , base=16 ) if not self.is_valid_public_key(snake_case__ ): raise ValueError("""Invalid public key""" ) lowerCAmelCase__ = pow(snake_case__ , self.__private_key , self.prime ) return shaaaa(str(snake_case__ ).encode() ).hexdigest() @staticmethod def _SCREAMING_SNAKE_CASE ( snake_case__ : int , snake_case__ : int ): # check if the other public key is valid based on NIST SP800-56 return ( 2 <= remote_public_key_str <= prime - 2 and pow(snake_case__ , (prime - 1) // 2 , snake_case__ ) == 1 ) @staticmethod def _SCREAMING_SNAKE_CASE ( snake_case__ : str , snake_case__ : str , snake_case__ : int = 14 ): lowerCAmelCase__ = int(snake_case__ , base=16 ) lowerCAmelCase__ = int(snake_case__ , base=16 ) lowerCAmelCase__ = primes[group]["""prime"""] if not DiffieHellman.is_valid_public_key_static(snake_case__ , snake_case__ ): raise ValueError("""Invalid public key""" ) lowerCAmelCase__ = pow(snake_case__ , snake_case__ , snake_case__ ) return shaaaa(str(snake_case__ ).encode() ).hexdigest() if __name__ == "__main__": import doctest doctest.testmod()
674
1
"""simple docstring""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml __lowerCAmelCase : Optional[int] = logging.get_logger(__name__) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" def run_func(lowerCamelCase__ ): @wraps(lowerCamelCase__ ) def run_in_eager_mode(*lowerCamelCase__ , **lowerCamelCase__ ): return func(*lowerCamelCase__ , **lowerCamelCase__ ) @wraps(lowerCamelCase__ ) @tf.function(experimental_compile=lowerCamelCase__ ) def run_in_graph_mode(*lowerCamelCase__ , **lowerCamelCase__ ): return func(*lowerCamelCase__ , **lowerCamelCase__ ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = random.Random() lowerCAmelCase__ = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(lowerCamelCase__ , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class a_ ( __UpperCamelCase ): UpperCamelCase_ : TensorFlowBenchmarkArguments UpperCamelCase_ : PretrainedConfig UpperCamelCase_ : str = "TensorFlow" @property def _SCREAMING_SNAKE_CASE ( self : Any ): return tf.__version__ def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : str , snake_case__ : int , snake_case__ : int ): # initialize GPU on separate process lowerCAmelCase__ = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) lowerCAmelCase__ = self._prepare_inference_func(snake_case__ , snake_case__ , snake_case__ ) return self._measure_speed(_inference ) def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : str , snake_case__ : int , snake_case__ : int ): lowerCAmelCase__ = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) lowerCAmelCase__ = self._prepare_train_func(snake_case__ , snake_case__ , snake_case__ ) return self._measure_speed(_train ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : str , snake_case__ : int , snake_case__ : int ): # initialize GPU on separate process if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , snake_case__ ) lowerCAmelCase__ = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) lowerCAmelCase__ = self._prepare_inference_func(snake_case__ , snake_case__ , snake_case__ ) return self._measure_memory(_inference ) def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : str , snake_case__ : int , snake_case__ : int ): if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , snake_case__ ) lowerCAmelCase__ = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) lowerCAmelCase__ = self._prepare_train_func(snake_case__ , snake_case__ , snake_case__ ) return self._measure_memory(_train ) def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : str , snake_case__ : int , snake_case__ : int ): lowerCAmelCase__ = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) lowerCAmelCase__ = ( hasattr(snake_case__ , """architectures""" ) and isinstance(config.architectures , snake_case__ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: lowerCAmelCase__ = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model lowerCAmelCase__ = __import__("""transformers""" , fromlist=[model_class] ) lowerCAmelCase__ = getattr(snake_case__ , snake_case__ ) lowerCAmelCase__ = model_cls(snake_case__ ) except ImportError: raise ImportError( F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to""" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: lowerCAmelCase__ = TF_MODEL_MAPPING[config.__class__](snake_case__ ) # encoder-decoder has vocab size saved differently lowerCAmelCase__ = config.vocab_size if hasattr(snake_case__ , """vocab_size""" ) else config.encoder.vocab_size lowerCAmelCase__ = random_input_ids(snake_case__ , snake_case__ , snake_case__ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(snake_case__ , decoder_input_ids=snake_case__ , training=snake_case__ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(snake_case__ , training=snake_case__ ) lowerCAmelCase__ = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : str , snake_case__ : int , snake_case__ : int ): lowerCAmelCase__ = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) lowerCAmelCase__ = ( hasattr(snake_case__ , """architectures""" ) and isinstance(config.architectures , snake_case__ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: lowerCAmelCase__ = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model lowerCAmelCase__ = __import__("""transformers""" , fromlist=[model_class] ) lowerCAmelCase__ = getattr(snake_case__ , snake_case__ ) lowerCAmelCase__ = model_cls(snake_case__ ) except ImportError: raise ImportError( F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to""" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: lowerCAmelCase__ = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](snake_case__ ) # encoder-decoder has vocab size saved differently lowerCAmelCase__ = config.vocab_size if hasattr(snake_case__ , """vocab_size""" ) else config.encoder.vocab_size lowerCAmelCase__ = random_input_ids(snake_case__ , snake_case__ , snake_case__ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): lowerCAmelCase__ = model(snake_case__ , decoder_input_ids=snake_case__ , labels=snake_case__ , training=snake_case__ )[0] lowerCAmelCase__ = tf.gradients(snake_case__ , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): lowerCAmelCase__ = model(snake_case__ , labels=snake_case__ , training=snake_case__ )[0] lowerCAmelCase__ = tf.gradients(snake_case__ , model.trainable_variables ) return gradients lowerCAmelCase__ = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : Union[str, Any] ): with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(snake_case__ , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average lowerCAmelCase__ = timeit.repeat( snake_case__ , repeat=self.args.repeat , number=10 , ) return min(snake_case__ ) / 10.0 except ResourceExhaustedError as e: self.print_fn(F"""Doesn't fit on GPU. {e}""" ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Callable[[], None] ): logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) lowerCAmelCase__ = start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won't log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) lowerCAmelCase__ = """N/A""" else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() lowerCAmelCase__ = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) lowerCAmelCase__ = nvml.nvmlDeviceGetMemoryInfo(snake_case__ ) lowerCAmelCase__ = meminfo.used lowerCAmelCase__ = Memory(snake_case__ ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) lowerCAmelCase__ = None else: lowerCAmelCase__ = measure_peak_memory_cpu(snake_case__ ) lowerCAmelCase__ = Memory(snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else memory_bytes if self.args.trace_memory_line_by_line: lowerCAmelCase__ = stop_memory_tracing(snake_case__ ) if memory is None: lowerCAmelCase__ = summary.total else: lowerCAmelCase__ = None return memory, summary except ResourceExhaustedError as e: self.print_fn(F"""Doesn't fit on GPU. {e}""" ) return "N/A", None
674
"""simple docstring""" import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ): """simple docstring""" assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match""" lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ ) if bias is not None: assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match""" lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = np.asarray(weights[0] ) lowerCAmelCase__ = np.asarray(weights[1] ) lowerCAmelCase__ = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , ) set_param( torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = np.asarray(weights[0] ) lowerCAmelCase__ = np.asarray(weights[1] ) lowerCAmelCase__ = np.asarray(weights[2] ) lowerCAmelCase__ = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , ) set_param( torch_layer.self_attention.key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , ) set_param( torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = weights[0][0][0] lowerCAmelCase__ = np.asarray(layer_norm_a[0] ) lowerCAmelCase__ = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , ) # lsh weights + output lowerCAmelCase__ = weights[0][1] if len(lowerCamelCase__ ) < 4: set_layer_weights_in_torch_lsh(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ ) else: set_layer_weights_in_torch_local(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ ) # intermediate weighs lowerCAmelCase__ = weights[2][0][1][2] # Chunked Feed Forward if len(lowerCamelCase__ ) == 4: lowerCAmelCase__ = intermediate_weights[2] # layernorm 2 lowerCAmelCase__ = np.asarray(intermediate_weights[0][0] ) lowerCAmelCase__ = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , ) # intermediate dense lowerCAmelCase__ = np.asarray(intermediate_weights[1][0] ) lowerCAmelCase__ = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , ) # intermediate out lowerCAmelCase__ = np.asarray(intermediate_weights[4][0] ) lowerCAmelCase__ = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = torch_model.reformer # word embeds lowerCAmelCase__ = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase__ ) , ) if isinstance(weights[3] , lowerCamelCase__ ): lowerCAmelCase__ = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): lowerCAmelCase__ = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), f"""{position_embeddings[emb_idx]} emb does not match""" lowerCAmelCase__ = nn.Parameter(torch.tensor(lowerCamelCase__ ) ) lowerCAmelCase__ = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( lowerCamelCase__ ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): lowerCAmelCase__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # output layer norm lowerCAmelCase__ = np.asarray(weights[7][0] ) lowerCAmelCase__ = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , ) # output embeddings lowerCAmelCase__ = np.asarray(weights[9][0] ) lowerCAmelCase__ = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = ReformerConfig.from_json_file(lowerCamelCase__ ) print(f"""Building PyTorch model from configuration: {config}""" ) lowerCAmelCase__ = ReformerModelWithLMHead(lowerCamelCase__ ) with open(lowerCamelCase__ , """rb""" ) as f: lowerCAmelCase__ = pickle.load(lowerCamelCase__ )["""weights"""] set_model_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , config.hidden_size ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , lowerCamelCase__ ) if __name__ == "__main__": __lowerCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained Reformer model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __lowerCAmelCase : Union[str, Any] = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
674
1
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __lowerCAmelCase : Any = abspath(join(dirname(dirname(dirname(__file__))), "src")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="ignore", category=FutureWarning) def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(lowerCamelCase__ ) def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" from transformers.testing_utils import pytest_terminal_summary_main lowerCAmelCase__ = terminalreporter.config.getoption("""--make-reports""" ) if make_reports: pytest_terminal_summary_main(lowerCamelCase__ , id=lowerCamelCase__ )
674
"""simple docstring""" import os from math import logaa def _UpperCAmelCase ( lowerCamelCase__ = "base_exp.txt" ): """simple docstring""" lowerCAmelCase__ = 0 lowerCAmelCase__ = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(lowerCamelCase__ ) , lowerCamelCase__ ) ) ): lowerCAmelCase__ , lowerCAmelCase__ = list(map(lowerCamelCase__ , line.split(""",""" ) ) ) if x * logaa(lowerCamelCase__ ) > largest: lowerCAmelCase__ = x * logaa(lowerCamelCase__ ) lowerCAmelCase__ = i + 1 return result if __name__ == "__main__": print(solution())
674
1
"""simple docstring""" from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
674
"""simple docstring""" def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" while b: lowerCAmelCase__ , lowerCAmelCase__ = b, a % b return a def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" return a if b == 0 else euclidean_gcd_recursive(lowerCamelCase__ , a % b ) def _UpperCAmelCase ( ): """simple docstring""" print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" ) print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" ) print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" ) print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" ) print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" ) print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" ) print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" ) print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" ) if __name__ == "__main__": main()
674
1
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING __lowerCAmelCase : List[str] = logging.get_logger(__name__) __lowerCAmelCase : List[str] = { "Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json", } class a_ ( __UpperCamelCase ): UpperCamelCase_ : Dict = "instructblip_vision_model" def __init__( self : Optional[int] , snake_case__ : Optional[Any]=1408 , snake_case__ : List[str]=6144 , snake_case__ : Optional[Any]=39 , snake_case__ : Optional[int]=16 , snake_case__ : List[str]=224 , snake_case__ : Any=14 , snake_case__ : Any="gelu" , snake_case__ : List[str]=1E-6 , snake_case__ : List[str]=0.0 , snake_case__ : Optional[int]=1E-10 , snake_case__ : int=True , **snake_case__ : int , ): super().__init__(**snake_case__ ) lowerCAmelCase__ = hidden_size lowerCAmelCase__ = intermediate_size lowerCAmelCase__ = num_hidden_layers lowerCAmelCase__ = num_attention_heads lowerCAmelCase__ = patch_size lowerCAmelCase__ = image_size lowerCAmelCase__ = initializer_range lowerCAmelCase__ = attention_dropout lowerCAmelCase__ = layer_norm_eps lowerCAmelCase__ = hidden_act lowerCAmelCase__ = qkv_bias @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] , snake_case__ : Union[str, os.PathLike] , **snake_case__ : Dict ): cls._set_token_in_kwargs(snake_case__ ) lowerCAmelCase__ , lowerCAmelCase__ = cls.get_config_dict(snake_case__ , **snake_case__ ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get("""model_type""" ) == "instructblip": lowerCAmelCase__ = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(snake_case__ , **snake_case__ ) class a_ ( __UpperCamelCase ): UpperCamelCase_ : int = "instructblip_qformer" def __init__( self : Tuple , snake_case__ : Optional[int]=30522 , snake_case__ : Dict=768 , snake_case__ : Dict=12 , snake_case__ : Dict=12 , snake_case__ : List[Any]=3072 , snake_case__ : Any="gelu" , snake_case__ : str=0.1 , snake_case__ : int=0.1 , snake_case__ : Any=512 , snake_case__ : List[str]=0.02 , snake_case__ : Any=1E-12 , snake_case__ : Optional[int]=0 , snake_case__ : str="absolute" , snake_case__ : Any=2 , snake_case__ : Any=1408 , **snake_case__ : Any , ): super().__init__(pad_token_id=snake_case__ , **snake_case__ ) lowerCAmelCase__ = vocab_size lowerCAmelCase__ = hidden_size lowerCAmelCase__ = num_hidden_layers lowerCAmelCase__ = num_attention_heads lowerCAmelCase__ = hidden_act lowerCAmelCase__ = intermediate_size lowerCAmelCase__ = hidden_dropout_prob lowerCAmelCase__ = attention_probs_dropout_prob lowerCAmelCase__ = max_position_embeddings lowerCAmelCase__ = initializer_range lowerCAmelCase__ = layer_norm_eps lowerCAmelCase__ = position_embedding_type lowerCAmelCase__ = cross_attention_frequency lowerCAmelCase__ = encoder_hidden_size @classmethod def _SCREAMING_SNAKE_CASE ( cls : Any , snake_case__ : Union[str, os.PathLike] , **snake_case__ : List[str] ): cls._set_token_in_kwargs(snake_case__ ) lowerCAmelCase__ , lowerCAmelCase__ = cls.get_config_dict(snake_case__ , **snake_case__ ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get("""model_type""" ) == "instructblip": lowerCAmelCase__ = config_dict["""qformer_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(snake_case__ , **snake_case__ ) class a_ ( __UpperCamelCase ): UpperCamelCase_ : Union[str, Any] = "instructblip" UpperCamelCase_ : Optional[Any] = True def __init__( self : Dict , snake_case__ : str=None , snake_case__ : str=None , snake_case__ : Optional[int]=None , snake_case__ : List[str]=32 , **snake_case__ : int ): super().__init__(**snake_case__ ) if vision_config is None: lowerCAmelCase__ = {} logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" ) if qformer_config is None: lowerCAmelCase__ = {} logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" ) if text_config is None: lowerCAmelCase__ = {} logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" ) lowerCAmelCase__ = InstructBlipVisionConfig(**snake_case__ ) lowerCAmelCase__ = InstructBlipQFormerConfig(**snake_case__ ) lowerCAmelCase__ = text_config["""model_type"""] if """model_type""" in text_config else """opt""" lowerCAmelCase__ = CONFIG_MAPPING[text_model_type](**snake_case__ ) lowerCAmelCase__ = self.text_config.tie_word_embeddings lowerCAmelCase__ = self.text_config.is_encoder_decoder lowerCAmelCase__ = num_query_tokens lowerCAmelCase__ = self.vision_config.hidden_size lowerCAmelCase__ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES lowerCAmelCase__ = 1.0 lowerCAmelCase__ = 0.02 @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] , snake_case__ : InstructBlipVisionConfig , snake_case__ : InstructBlipQFormerConfig , snake_case__ : PretrainedConfig , **snake_case__ : Union[str, Any] , ): return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **snake_case__ , ) def _SCREAMING_SNAKE_CASE ( self : Any ): lowerCAmelCase__ = copy.deepcopy(self.__dict__ ) lowerCAmelCase__ = self.vision_config.to_dict() lowerCAmelCase__ = self.qformer_config.to_dict() lowerCAmelCase__ = self.text_config.to_dict() lowerCAmelCase__ = self.__class__.model_type return output
674
"""simple docstring""" import os def _UpperCAmelCase ( ): """simple docstring""" lowerCAmelCase__ = os.path.dirname(os.path.realpath(lowerCamelCase__ ) ) lowerCAmelCase__ = os.path.join(lowerCamelCase__ , """triangle.txt""" ) with open(lowerCamelCase__ ) as f: lowerCAmelCase__ = f.readlines() lowerCAmelCase__ = [] for line in triangle: lowerCAmelCase__ = [] for number in line.strip().split(""" """ ): numbers_from_line.append(int(lowerCamelCase__ ) ) a.append(lowerCamelCase__ ) for i in range(1 , len(lowerCamelCase__ ) ): for j in range(len(a[i] ) ): lowerCAmelCase__ = a[i - 1][j] if j != len(a[i - 1] ) else 0 lowerCAmelCase__ = a[i - 1][j - 1] if j > 0 else 0 a[i][j] += max(lowerCamelCase__ , lowerCamelCase__ ) return max(a[-1] ) if __name__ == "__main__": print(solution())
674
1
"""simple docstring""" def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
674
"""simple docstring""" import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu __lowerCAmelCase : Union[str, Any] = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json" with io.open(filename, "r", encoding="utf-8") as f: __lowerCAmelCase : Optional[int] = json.load(f) @require_torch class a_ ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Dict ): return FSMTTokenizer.from_pretrained(snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Any ): lowerCAmelCase__ = FSMTForConditionalGeneration.from_pretrained(snake_case__ ).to(snake_case__ ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ["""en-ru""", 26.0], ["""ru-en""", 22.0], ["""en-de""", 22.0], ["""de-en""", 29.0], ] ) @slow def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Any , snake_case__ : int ): # note: this test is not testing the best performance since it only evals a small batch # but it should be enough to detect a regression in the output quality lowerCAmelCase__ = F"""facebook/wmt19-{pair}""" lowerCAmelCase__ = self.get_tokenizer(snake_case__ ) lowerCAmelCase__ = self.get_model(snake_case__ ) lowerCAmelCase__ = bleu_data[pair]["""src"""] lowerCAmelCase__ = bleu_data[pair]["""tgt"""] lowerCAmelCase__ = tokenizer(snake_case__ , return_tensors="""pt""" , truncation=snake_case__ , padding="""longest""" ).to(snake_case__ ) lowerCAmelCase__ = model.generate( input_ids=batch.input_ids , num_beams=8 , ) lowerCAmelCase__ = tokenizer.batch_decode( snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ ) lowerCAmelCase__ = calculate_bleu(snake_case__ , snake_case__ ) print(snake_case__ ) self.assertGreaterEqual(scores["""bleu"""] , snake_case__ )
674
1
"""simple docstring""" import pprint import requests __lowerCAmelCase : Union[str, Any] = "https://zenquotes.io/api" def _UpperCAmelCase ( ): """simple docstring""" return requests.get(API_ENDPOINT_URL + """/today""" ).json() def _UpperCAmelCase ( ): """simple docstring""" return requests.get(API_ENDPOINT_URL + """/random""" ).json() if __name__ == "__main__": __lowerCAmelCase : Union[str, Any] = random_quotes() pprint.pprint(response)
674
"""simple docstring""" import pprint import requests __lowerCAmelCase : Union[str, Any] = "https://zenquotes.io/api" def _UpperCAmelCase ( ): """simple docstring""" return requests.get(API_ENDPOINT_URL + """/today""" ).json() def _UpperCAmelCase ( ): """simple docstring""" return requests.get(API_ENDPOINT_URL + """/random""" ).json() if __name__ == "__main__": __lowerCAmelCase : Union[str, Any] = random_quotes() pprint.pprint(response)
674
1
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __lowerCAmelCase : Dict = logging.get_logger(__name__) __lowerCAmelCase : Optional[int] = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "ctc_proj", "mask_emb": "masked_spec_embed", } __lowerCAmelCase : Optional[int] = [ "ctc_proj", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" for attribute in key.split(""".""" ): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models lowerCAmelCase__ = """lm_head""" lowerCAmelCase__ = getattr(lowerCamelCase__ , lowerCamelCase__ ) if weight_type is not None: lowerCAmelCase__ = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape else: lowerCAmelCase__ = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": lowerCAmelCase__ = value elif weight_type == "weight_g": lowerCAmelCase__ = value elif weight_type == "weight_v": lowerCAmelCase__ = value elif weight_type == "bias": lowerCAmelCase__ = value else: lowerCAmelCase__ = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = [] lowerCAmelCase__ = fairseq_model.state_dict() lowerCAmelCase__ = hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): lowerCAmelCase__ = False if "conv_layers" in name: load_conv_layer( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == """group""" , ) lowerCAmelCase__ = True else: for key, mapped_key in MAPPING.items(): lowerCAmelCase__ = """unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: lowerCAmelCase__ = True if "*" in mapped_key: lowerCAmelCase__ = name.split(lowerCamelCase__ )[0].split(""".""" )[-2] lowerCAmelCase__ = mapped_key.replace("""*""" , lowerCamelCase__ ) if "weight_g" in name: lowerCAmelCase__ = """weight_g""" elif "weight_v" in name: lowerCAmelCase__ = """weight_v""" elif "bias" in name: lowerCAmelCase__ = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj lowerCAmelCase__ = """weight""" else: lowerCAmelCase__ = None set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) continue if not is_used: unused_weights.append(lowerCamelCase__ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = full_name.split("""conv_layers.""" )[-1] lowerCAmelCase__ = name.split(""".""" ) lowerCAmelCase__ = int(items[0] ) lowerCAmelCase__ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) lowerCAmelCase__ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) lowerCAmelCase__ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) lowerCAmelCase__ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) lowerCAmelCase__ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowerCamelCase__ ) @torch.no_grad() def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True ): """simple docstring""" if config_path is not None: lowerCAmelCase__ = UniSpeechConfig.from_pretrained(lowerCamelCase__ ) else: lowerCAmelCase__ = UniSpeechConfig() if is_finetuned: if dict_path: lowerCAmelCase__ = Dictionary.load_from_json(lowerCamelCase__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowerCAmelCase__ = target_dict.pad_index lowerCAmelCase__ = target_dict.bos_index lowerCAmelCase__ = target_dict.eos_index lowerCAmelCase__ = len(target_dict.symbols ) lowerCAmelCase__ = os.path.join(lowerCamelCase__ , """vocab.json""" ) if not os.path.isdir(lowerCamelCase__ ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCamelCase__ ) ) return os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ ) lowerCAmelCase__ = target_dict.indices # fairseq has the <pad> and <s> switched lowerCAmelCase__ = 42 lowerCAmelCase__ = 43 with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(lowerCamelCase__ , lowerCamelCase__ ) lowerCAmelCase__ = WavaVecaPhonemeCTCTokenizer( lowerCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCamelCase__ , ) lowerCAmelCase__ = True if config.feat_extract_norm == """layer""" else False lowerCAmelCase__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , ) lowerCAmelCase__ = WavaVecaProcessor(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ ) processor.save_pretrained(lowerCamelCase__ ) lowerCAmelCase__ = UniSpeechForCTC(lowerCamelCase__ ) else: lowerCAmelCase__ = UniSpeechForPreTraining(lowerCamelCase__ ) if is_finetuned: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path} ) else: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) lowerCAmelCase__ = model[0].eval() recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) hf_unispeech.save_pretrained(lowerCamelCase__ ) if __name__ == "__main__": __lowerCAmelCase : Any = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) __lowerCAmelCase : str = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
674
"""simple docstring""" import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class a_ ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): lowerCAmelCase__ = 0 def _SCREAMING_SNAKE_CASE ( self : List[Any] ): lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" ) self.assertIsInstance(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json""" lowerCAmelCase__ = Path(snake_case__ ) / """config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json""" lowerCAmelCase__ = Path(snake_case__ ) / """config.json""" json.dump( {"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ = CLIPConfig() # Create a dummy config file with image_proceesor_type lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json""" lowerCAmelCase__ = Path(snake_case__ ) / """config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ).to_dict() config_dict.pop("""image_processor_type""" ) lowerCAmelCase__ = CLIPImageProcessor(**snake_case__ ) # save in new folder model_config.save_pretrained(snake_case__ ) config.save_pretrained(snake_case__ ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ) # make sure private variable is not incorrectly saved lowerCAmelCase__ = json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Dict ): with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): with self.assertRaisesRegex( snake_case__ , """clip-base is not a local folder and is not a valid model identifier""" ): lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""clip-base""" ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): with self.assertRaisesRegex( snake_case__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ , revision="""aaaaaa""" ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): with self.assertRaisesRegex( snake_case__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ): lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" ) def _SCREAMING_SNAKE_CASE ( self : Any ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(snake_case__ ): lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(snake_case__ ): lowerCAmelCase__ = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(snake_case__ ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ , trust_remote_code=snake_case__ ) self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" ) def _SCREAMING_SNAKE_CASE ( self : Dict ): try: AutoConfig.register("""custom""" , snake_case__ ) AutoImageProcessor.register(snake_case__ , snake_case__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(snake_case__ ): AutoImageProcessor.register(snake_case__ , snake_case__ ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json""" lowerCAmelCase__ = Path(snake_case__ ) / """config.json""" json.dump( {"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) ) lowerCAmelCase__ = CustomImageProcessor.from_pretrained(snake_case__ ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(snake_case__ ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def _SCREAMING_SNAKE_CASE ( self : List[str] ): class a_ ( __UpperCamelCase ): UpperCamelCase_ : Tuple = True try: AutoConfig.register("""custom""" , snake_case__ ) AutoImageProcessor.register(snake_case__ , snake_case__ ) # If remote code is not set, the default is to use local lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. lowerCAmelCase__ = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub lowerCAmelCase__ = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(not hasattr(snake_case__ , """is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
674
1
"""simple docstring""" import os def _UpperCAmelCase ( ): """simple docstring""" lowerCAmelCase__ = os.path.join(os.path.dirname(lowerCamelCase__ ) , """num.txt""" ) with open(lowerCamelCase__ ) as file_hand: return str(sum(int(lowerCamelCase__ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
674
"""simple docstring""" import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class a_ : def __init__( self : Optional[int] , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=64 , snake_case__ : Any=None ): lowerCAmelCase__ = np.random.default_rng(snake_case__ ) lowerCAmelCase__ = length lowerCAmelCase__ = rng.normal(size=(length,) ).astype(np.floataa ) lowerCAmelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self : Optional[Any] ): return self.length def __getitem__( self : List[str] , snake_case__ : Optional[int] ): return {"x": self.x[i], "y": self.y[i]} class a_ ( torch.nn.Module ): def __init__( self : List[str] , snake_case__ : str=0 , snake_case__ : Dict=0 , snake_case__ : Any=False ): super().__init__() lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) lowerCAmelCase__ = True def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Any=None ): if self.first_batch: print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" ) lowerCAmelCase__ = False return x * self.a[0] + self.b[0] class a_ ( torch.nn.Module ): def __init__( self : Any , snake_case__ : Union[str, Any]=0 , snake_case__ : Union[str, Any]=0 , snake_case__ : List[Any]=False ): super().__init__() lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(snake_case__ ).float() ) lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(snake_case__ ).float() ) lowerCAmelCase__ = True def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Optional[Any]=None ): if self.first_batch: print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" ) lowerCAmelCase__ = False return x * self.a + self.b def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 16 ): """simple docstring""" from datasets import load_dataset from transformers import AutoTokenizer lowerCAmelCase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" ) lowerCAmelCase__ = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""} lowerCAmelCase__ = load_dataset("""csv""" , data_files=lowerCamelCase__ ) lowerCAmelCase__ = datasets["""train"""].unique("""label""" ) lowerCAmelCase__ = {v: i for i, v in enumerate(lowerCamelCase__ )} def tokenize_function(lowerCamelCase__ ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase__ = tokenizer( examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" ) if "label" in examples: lowerCAmelCase__ = [label_to_id[l] for l in examples["""label"""]] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowerCAmelCase__ = datasets.map( lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["""sentence1""", """sentence2""", """label"""] , ) def collate_fn(lowerCamelCase__ ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowerCamelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return tokenizer.pad(lowerCamelCase__ , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. lowerCAmelCase__ = DataLoader(tokenized_datasets["""train"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=2 ) lowerCAmelCase__ = DataLoader(tokenized_datasets["""validation"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=1 ) return train_dataloader, eval_dataloader
674
1
"""simple docstring""" from __future__ import annotations from random import random from typing import Generic, TypeVar __lowerCAmelCase : Union[str, Any] = TypeVar("KT") __lowerCAmelCase : Optional[int] = TypeVar("VT") class a_ ( Generic[KT, VT] ): def __init__( self : int , snake_case__ : KT | str = "root" , snake_case__ : VT | None = None ): lowerCAmelCase__ = key lowerCAmelCase__ = value lowerCAmelCase__ = [] def __repr__( self : str ): return F"""Node({self.key}: {self.value})""" @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): return len(self.forward ) class a_ ( Generic[KT, VT] ): def __init__( self : int , snake_case__ : float = 0.5 , snake_case__ : int = 16 ): lowerCAmelCase__ = Node[KT, VT]() lowerCAmelCase__ = 0 lowerCAmelCase__ = p lowerCAmelCase__ = max_level def __str__( self : str ): lowerCAmelCase__ = list(self ) if len(snake_case__ ) == 0: return F"""SkipList(level={self.level})""" lowerCAmelCase__ = max((len(str(snake_case__ ) ) for item in items) , default=4 ) lowerCAmelCase__ = max(snake_case__ , 4 ) + 4 lowerCAmelCase__ = self.head lowerCAmelCase__ = [] lowerCAmelCase__ = node.forward.copy() lines.append(F"""[{node.key}]""".ljust(snake_case__ , """-""" ) + """* """ * len(snake_case__ ) ) lines.append(""" """ * label_size + """| """ * len(snake_case__ ) ) while len(node.forward ) != 0: lowerCAmelCase__ = node.forward[0] lines.append( F"""[{node.key}]""".ljust(snake_case__ , """-""" ) + """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) ) lines.append(""" """ * label_size + """| """ * len(snake_case__ ) ) lowerCAmelCase__ = node.forward lines.append("""None""".ljust(snake_case__ ) + """* """ * len(snake_case__ ) ) return F"""SkipList(level={self.level})\n""" + "\n".join(snake_case__ ) def __iter__( self : Dict ): lowerCAmelCase__ = self.head while len(node.forward ) != 0: yield node.forward[0].key lowerCAmelCase__ = node.forward[0] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): lowerCAmelCase__ = 1 while random() < self.p and level < self.max_level: level += 1 return level def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : List[str] ): lowerCAmelCase__ = [] lowerCAmelCase__ = self.head for i in reversed(range(self.level ) ): # i < node.level - When node level is lesser than `i` decrement `i`. # node.forward[i].key < key - Jumping to node with key value higher # or equal to searched key would result # in skipping searched key. while i < node.level and node.forward[i].key < key: lowerCAmelCase__ = node.forward[i] # Each leftmost node (relative to searched node) will potentially have to # be updated. update_vector.append(snake_case__ ) update_vector.reverse() # Note that we were inserting values in reverse order. # len(node.forward) != 0 - If current node doesn't contain any further # references then searched key is not present. # node.forward[0].key == key - Next node key should be equal to search key # if key is present. if len(node.forward ) != 0 and node.forward[0].key == key: return node.forward[0], update_vector else: return None, update_vector def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : KT ): lowerCAmelCase__ , lowerCAmelCase__ = self._locate_node(snake_case__ ) if node is not None: for i, update_node in enumerate(snake_case__ ): # Remove or replace all references to removed node. if update_node.level > i and update_node.forward[i].key == key: if node.level > i: lowerCAmelCase__ = node.forward[i] else: lowerCAmelCase__ = update_node.forward[:i] def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : KT , snake_case__ : VT ): lowerCAmelCase__ , lowerCAmelCase__ = self._locate_node(snake_case__ ) if node is not None: lowerCAmelCase__ = value else: lowerCAmelCase__ = self.random_level() if level > self.level: # After level increase we have to add additional nodes to head. for _ in range(self.level - 1 , snake_case__ ): update_vector.append(self.head ) lowerCAmelCase__ = level lowerCAmelCase__ = Node(snake_case__ , snake_case__ ) for i, update_node in enumerate(update_vector[:level] ): # Change references to pass through new node. if update_node.level > i: new_node.forward.append(update_node.forward[i] ) if update_node.level < i + 1: update_node.forward.append(snake_case__ ) else: lowerCAmelCase__ = new_node def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : VT ): lowerCAmelCase__ , lowerCAmelCase__ = self._locate_node(snake_case__ ) if node is not None: return node.value return None def _UpperCAmelCase ( ): """simple docstring""" lowerCAmelCase__ = SkipList() skip_list.insert("""Key1""" , 3 ) skip_list.insert("""Key2""" , 12 ) skip_list.insert("""Key3""" , 41 ) skip_list.insert("""Key4""" , -19 ) lowerCAmelCase__ = skip_list.head lowerCAmelCase__ = {} while node.level != 0: lowerCAmelCase__ = node.forward[0] lowerCAmelCase__ = node.value assert len(lowerCamelCase__ ) == 4 assert all_values["Key1"] == 3 assert all_values["Key2"] == 12 assert all_values["Key3"] == 41 assert all_values["Key4"] == -19 def _UpperCAmelCase ( ): """simple docstring""" lowerCAmelCase__ = SkipList() skip_list.insert("""Key1""" , 10 ) skip_list.insert("""Key1""" , 12 ) skip_list.insert("""Key5""" , 7 ) skip_list.insert("""Key7""" , 10 ) skip_list.insert("""Key10""" , 5 ) skip_list.insert("""Key7""" , 7 ) skip_list.insert("""Key5""" , 5 ) skip_list.insert("""Key10""" , 10 ) lowerCAmelCase__ = skip_list.head lowerCAmelCase__ = {} while node.level != 0: lowerCAmelCase__ = node.forward[0] lowerCAmelCase__ = node.value if len(lowerCamelCase__ ) != 4: print() assert len(lowerCamelCase__ ) == 4 assert all_values["Key1"] == 12 assert all_values["Key7"] == 7 assert all_values["Key5"] == 5 assert all_values["Key10"] == 10 def _UpperCAmelCase ( ): """simple docstring""" lowerCAmelCase__ = SkipList() assert skip_list.find("""Some key""" ) is None def _UpperCAmelCase ( ): """simple docstring""" lowerCAmelCase__ = SkipList() skip_list.insert("""Key2""" , 20 ) assert skip_list.find("""Key2""" ) == 20 skip_list.insert("""Some Key""" , 10 ) skip_list.insert("""Key2""" , 8 ) skip_list.insert("""V""" , 13 ) assert skip_list.find("""Y""" ) is None assert skip_list.find("""Key2""" ) == 8 assert skip_list.find("""Some Key""" ) == 10 assert skip_list.find("""V""" ) == 13 def _UpperCAmelCase ( ): """simple docstring""" lowerCAmelCase__ = SkipList() skip_list.delete("""Some key""" ) assert len(skip_list.head.forward ) == 0 def _UpperCAmelCase ( ): """simple docstring""" lowerCAmelCase__ = SkipList() skip_list.insert("""Key1""" , 12 ) skip_list.insert("""V""" , 13 ) skip_list.insert("""X""" , 14 ) skip_list.insert("""Key2""" , 15 ) skip_list.delete("""V""" ) skip_list.delete("""Key2""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""Key2""" ) is None def _UpperCAmelCase ( ): """simple docstring""" lowerCAmelCase__ = SkipList() skip_list.insert("""Key1""" , 12 ) skip_list.insert("""V""" , 13 ) skip_list.insert("""X""" , 14 ) skip_list.insert("""Key2""" , 15 ) skip_list.delete("""V""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""X""" ) == 14 assert skip_list.find("""Key1""" ) == 12 assert skip_list.find("""Key2""" ) == 15 skip_list.delete("""X""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""X""" ) is None assert skip_list.find("""Key1""" ) == 12 assert skip_list.find("""Key2""" ) == 15 skip_list.delete("""Key1""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""X""" ) is None assert skip_list.find("""Key1""" ) is None assert skip_list.find("""Key2""" ) == 15 skip_list.delete("""Key2""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""X""" ) is None assert skip_list.find("""Key1""" ) is None assert skip_list.find("""Key2""" ) is None def _UpperCAmelCase ( ): """simple docstring""" lowerCAmelCase__ = SkipList() skip_list.insert("""Key1""" , 12 ) skip_list.insert("""V""" , 13 ) skip_list.insert("""X""" , 142 ) skip_list.insert("""Key2""" , 15 ) skip_list.delete("""X""" ) def traverse_keys(lowerCamelCase__ ): yield node.key for forward_node in node.forward: yield from traverse_keys(lowerCamelCase__ ) assert len(set(traverse_keys(skip_list.head ) ) ) == 4 def _UpperCAmelCase ( ): """simple docstring""" def is_sorted(lowerCamelCase__ ): return all(next_item >= item for item, next_item in zip(lowerCamelCase__ , lst[1:] ) ) lowerCAmelCase__ = SkipList() for i in range(10 ): skip_list.insert(lowerCamelCase__ , lowerCamelCase__ ) assert is_sorted(list(lowerCamelCase__ ) ) skip_list.delete(5 ) skip_list.delete(8 ) skip_list.delete(2 ) assert is_sorted(list(lowerCamelCase__ ) ) skip_list.insert(-12 , -12 ) skip_list.insert(77 , 77 ) assert is_sorted(list(lowerCamelCase__ ) ) def _UpperCAmelCase ( ): """simple docstring""" for _ in range(100 ): # Repeat test 100 times due to the probabilistic nature of skip list # random values == random bugs test_insert() test_insert_overrides_existing_value() test_searching_empty_list_returns_none() test_search() test_deleting_item_from_empty_list_do_nothing() test_deleted_items_are_not_founded_by_find_method() test_delete_removes_only_given_key() test_delete_doesnt_leave_dead_nodes() test_iter_always_yields_sorted_values() def _UpperCAmelCase ( ): """simple docstring""" lowerCAmelCase__ = SkipList() skip_list.insert(2 , """2""" ) skip_list.insert(4 , """4""" ) skip_list.insert(6 , """4""" ) skip_list.insert(4 , """5""" ) skip_list.insert(8 , """4""" ) skip_list.insert(9 , """4""" ) skip_list.delete(4 ) print(lowerCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
674
"""simple docstring""" import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = MobileBertConfig.from_json_file(lowerCamelCase__ ) print(f"""Building PyTorch model from configuration: {config}""" ) lowerCAmelCase__ = MobileBertForPreTraining(lowerCamelCase__ ) # Load weights from tf checkpoint lowerCAmelCase__ = load_tf_weights_in_mobilebert(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , lowerCamelCase__ ) if __name__ == "__main__": __lowerCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--mobilebert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained MobileBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __lowerCAmelCase : Optional[int] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
674
1
"""simple docstring""" def _UpperCAmelCase ( lowerCamelCase__ = 50 ): """simple docstring""" lowerCAmelCase__ = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(F"{solution() = }")
674
"""simple docstring""" def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), f"""The input value of [n={number}] is not an integer""" if number == 1: return 2 elif number < 1: lowerCAmelCase__ = f"""The input value of [n={number}] has to be > 0""" raise ValueError(lowerCamelCase__ ) else: lowerCAmelCase__ = sylvester(number - 1 ) lowerCAmelCase__ = num - 1 lowerCAmelCase__ = num return lower * upper + 1 if __name__ == "__main__": print(F"The 8th number in Sylvester's sequence: {sylvester(8)}")
674
1
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __lowerCAmelCase : str = logging.get_logger(__name__) __lowerCAmelCase : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} __lowerCAmelCase : Dict = { "tokenizer_file": { "EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json", }, } __lowerCAmelCase : List[Any] = { "gpt-neox-20b": 20_48, } class a_ ( _lowerCamelCase ): UpperCamelCase_ : Dict = VOCAB_FILES_NAMES UpperCamelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : Optional[Any] = ["input_ids", "attention_mask"] def __init__( self : Union[str, Any] , snake_case__ : List[str]=None , snake_case__ : List[str]=None , snake_case__ : Tuple=None , snake_case__ : Optional[int]="<|endoftext|>" , snake_case__ : Any="<|endoftext|>" , snake_case__ : Optional[Any]="<|endoftext|>" , snake_case__ : int=False , **snake_case__ : Optional[int] , ): super().__init__( A__ , A__ , tokenizer_file=A__ , unk_token=A__ , bos_token=A__ , eos_token=A__ , add_prefix_space=A__ , **A__ , ) lowerCAmelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , A__ ) != add_prefix_space: lowerCAmelCase__ = getattr(A__ , pre_tok_state.pop("""type""" ) ) lowerCAmelCase__ = add_prefix_space lowerCAmelCase__ = pre_tok_class(**A__ ) lowerCAmelCase__ = add_prefix_space def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : Dict = None ): lowerCAmelCase__ = self._tokenizer.model.save(A__ , name=A__ ) return tuple(A__ ) def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Union[str, Any] ): lowerCAmelCase__ = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(A__ , add_special_tokens=A__ ) + [self.eos_token_id] ) if len(A__ ) > self.model_max_length: lowerCAmelCase__ = input_ids[-self.model_max_length :] return input_ids
700
"""simple docstring""" import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __lowerCAmelCase : Optional[Any] = get_tests_dir("fixtures/test_sentencepiece_no_bos.model") @require_sentencepiece @require_tokenizers class a_ ( __UpperCamelCase , unittest.TestCase ): UpperCamelCase_ : Tuple = PegasusTokenizer UpperCamelCase_ : Any = PegasusTokenizerFast UpperCamelCase_ : int = True UpperCamelCase_ : Any = True def _SCREAMING_SNAKE_CASE ( self : Tuple ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ = PegasusTokenizer(snake_case__ ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): return PegasusTokenizer.from_pretrained("""google/pegasus-large""" ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **snake_case__ : Optional[Any] ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Optional[Any] ): return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): lowerCAmelCase__ = """</s>""" lowerCAmelCase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<pad>""" ) self.assertEqual(vocab_keys[1] , """</s>""" ) self.assertEqual(vocab_keys[-1] , """v""" ) self.assertEqual(len(snake_case__ ) , 1103 ) def _SCREAMING_SNAKE_CASE ( self : Any ): self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def _SCREAMING_SNAKE_CASE ( self : Dict ): lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ = ( """Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important""" """ </s> <pad> <pad> <pad>""" ) lowerCAmelCase__ = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] lowerCAmelCase__ = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Dict ): lowerCAmelCase__ = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word lowerCAmelCase__ = """<mask_1> To ensure a <mask_2> flow of bank resolutions.""" lowerCAmelCase__ = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1] lowerCAmelCase__ = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : int ): lowerCAmelCase__ = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 96103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 lowerCAmelCase__ = """To ensure a smooth flow of bank resolutions.""" lowerCAmelCase__ = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1] lowerCAmelCase__ = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def _SCREAMING_SNAKE_CASE ( self : List[str] ): lowerCAmelCase__ = ["""This is going to be way too long.""" * 150, """short example"""] lowerCAmelCase__ = ["""not super long but more than 5 tokens""", """tiny"""] lowerCAmelCase__ = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" ) lowerCAmelCase__ = self._large_tokenizer( text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(snake_case__ ) == 2 # input_ids, attention_mask. @slow def _SCREAMING_SNAKE_CASE ( self : str ): # fmt: off lowerCAmelCase__ = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case__ , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , ) @require_sentencepiece @require_tokenizers class a_ ( __UpperCamelCase , unittest.TestCase ): UpperCamelCase_ : str = PegasusTokenizer UpperCamelCase_ : Optional[int] = PegasusTokenizerFast UpperCamelCase_ : Union[str, Any] = True UpperCamelCase_ : Optional[int] = True def _SCREAMING_SNAKE_CASE ( self : List[str] ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ = PegasusTokenizer(snake_case__ , offset=0 , mask_token_sent=snake_case__ , mask_token="""[MASK]""" ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : Dict ): return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **snake_case__ : List[Any] ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Dict ): return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : List[str] ): lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ = ( """Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>""" """ <pad> <pad> <pad>""" ) lowerCAmelCase__ = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] lowerCAmelCase__ = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) @require_torch def _SCREAMING_SNAKE_CASE ( self : List[Any] ): lowerCAmelCase__ = ["""This is going to be way too long.""" * 1000, """short example"""] lowerCAmelCase__ = ["""not super long but more than 5 tokens""", """tiny"""] lowerCAmelCase__ = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" ) lowerCAmelCase__ = self._large_tokenizer( text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(snake_case__ ) == 2 # input_ids, attention_mask. def _SCREAMING_SNAKE_CASE ( self : List[Any] ): lowerCAmelCase__ = ( """This is an example string that is used to test the original TF implementation against the HF""" """ implementation""" ) lowerCAmelCase__ = self._large_tokenizer(snake_case__ ).input_ids self.assertListEqual( snake_case__ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
674
0
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class a_ ( _snake_case ): UpperCamelCase_ : Optional[int] = ["image_processor", "tokenizer"] UpperCamelCase_ : List[str] = "AutoImageProcessor" UpperCamelCase_ : Optional[int] = "AutoTokenizer" def __init__( self : Tuple , snake_case__ : Tuple , snake_case__ : str ): super().__init__(lowerCAmelCase__ , lowerCAmelCase__ ) lowerCAmelCase__ = self.image_processor def __call__( self : List[Any] , snake_case__ : int=None , snake_case__ : Any=None , snake_case__ : Optional[Any]=None , **snake_case__ : int ): if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: lowerCAmelCase__ = self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ ) if images is not None: lowerCAmelCase__ = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ ) if text is not None and images is not None: lowerCAmelCase__ = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( self : Tuple , *snake_case__ : int , **snake_case__ : List[str] ): return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ ) def _SCREAMING_SNAKE_CASE ( self : int , *snake_case__ : Tuple , **snake_case__ : Optional[Any] ): return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ): return ["input_ids", "attention_mask", "pixel_values"]
701
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class a_ ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : int ): if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=snake_case__ , ) assert hasattr(self , """env""" ) def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Optional[Any] ): # configuration for running training on smdistributed Model Parallel lowerCAmelCase__ = { """enabled""": True, """processes_per_host""": 8, } lowerCAmelCase__ = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } lowerCAmelCase__ = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} lowerCAmelCase__ = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case__ , instance_type=self.instance_type , debugger_hook_config=snake_case__ , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 500, } , metric_definitions=self.env.metric_definitions , distribution=snake_case__ , py_version="""py36""" , ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : str ): TrainingJobAnalytics(snake_case__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : List[str] ): # create estimator lowerCAmelCase__ = self.create_estimator(snake_case__ ) # run training estimator.fit() # result dataframe lowerCAmelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) lowerCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCAmelCase__ = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , snake_case__ )
674
0
"""simple docstring""" from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar __lowerCAmelCase = TypeVar("T") def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" return (position - 1) // 2 def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" return (2 * position) + 1 def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" return (2 * position) + 2 class a_ ( Generic[T] ): def __init__( self : Optional[Any] ): lowerCAmelCase__ = [] lowerCAmelCase__ = {} lowerCAmelCase__ = 0 def __len__( self : Dict ): return self.elements def __repr__( self : Dict ): return str(self.heap ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): return self.elements == 0 def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : T , snake_case__ : int ): self.heap.append((elem, weight) ) lowerCAmelCase__ = self.elements self.elements += 1 self._bubble_up(UpperCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) lowerCAmelCase__ , lowerCAmelCase__ = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: lowerCAmelCase__ , lowerCAmelCase__ = self.heap[0] self._bubble_down(UpperCAmelCase_ ) return elem def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : T , snake_case__ : int ): lowerCAmelCase__ = self.position_map[elem] lowerCAmelCase__ = (elem, weight) if position > 0: lowerCAmelCase__ = get_parent_position(UpperCAmelCase_ ) lowerCAmelCase__ , lowerCAmelCase__ = self.heap[parent_position] if parent_weight > weight: self._bubble_up(UpperCAmelCase_ ) else: self._bubble_down(UpperCAmelCase_ ) else: self._bubble_down(UpperCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : T ): lowerCAmelCase__ = self.position_map[elem] if curr_pos == 0: return None lowerCAmelCase__ = get_parent_position(UpperCAmelCase_ ) lowerCAmelCase__ , lowerCAmelCase__ = self.heap[curr_pos] lowerCAmelCase__ , lowerCAmelCase__ = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(UpperCAmelCase_ , UpperCAmelCase_ ) return self._bubble_up(UpperCAmelCase_ ) return None def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : T ): lowerCAmelCase__ = self.position_map[elem] lowerCAmelCase__ , lowerCAmelCase__ = self.heap[curr_pos] lowerCAmelCase__ = get_child_left_position(UpperCAmelCase_ ) lowerCAmelCase__ = get_child_right_position(UpperCAmelCase_ ) if child_left_position < self.elements and child_right_position < self.elements: lowerCAmelCase__ , lowerCAmelCase__ = self.heap[child_left_position] lowerCAmelCase__ , lowerCAmelCase__ = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(UpperCAmelCase_ , UpperCAmelCase_ ) return self._bubble_down(UpperCAmelCase_ ) if child_left_position < self.elements: lowerCAmelCase__ , lowerCAmelCase__ = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(UpperCAmelCase_ , UpperCAmelCase_ ) return self._bubble_down(UpperCAmelCase_ ) else: return None if child_right_position < self.elements: lowerCAmelCase__ , lowerCAmelCase__ = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(UpperCAmelCase_ , UpperCAmelCase_ ) return self._bubble_down(UpperCAmelCase_ ) return None def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : int , snake_case__ : int ): lowerCAmelCase__ = self.heap[nodea_pos][0] lowerCAmelCase__ = self.heap[nodea_pos][0] lowerCAmelCase__ , lowerCAmelCase__ = ( self.heap[nodea_pos], self.heap[nodea_pos], ) lowerCAmelCase__ = nodea_pos lowerCAmelCase__ = nodea_pos class a_ ( Generic[T] ): def __init__( self : Any ): lowerCAmelCase__ = {} lowerCAmelCase__ = 0 def __repr__( self : Tuple ): return str(self.connections ) def __len__( self : Dict ): return self.nodes def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : T ): if node not in self.connections: lowerCAmelCase__ = {} self.nodes += 1 def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : T , snake_case__ : T , snake_case__ : int ): self.add_node(UpperCAmelCase_ ) self.add_node(UpperCAmelCase_ ) lowerCAmelCase__ = weight lowerCAmelCase__ = weight def _UpperCAmelCase ( lowerCamelCase__ , ): """simple docstring""" lowerCAmelCase__ = {node: maxsize for node in graph.connections} lowerCAmelCase__ = {node: None for node in graph.connections} lowerCAmelCase__ = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if priority_queue.is_empty(): return dist, parent # initialization lowerCAmelCase__ = priority_queue.extract_min() lowerCAmelCase__ = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: lowerCAmelCase__ = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(_SCREAMING_SNAKE_CASE , dist[neighbour] ) lowerCAmelCase__ = node # running prim's algorithm while not priority_queue.is_empty(): lowerCAmelCase__ = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: lowerCAmelCase__ = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(_SCREAMING_SNAKE_CASE , dist[neighbour] ) lowerCAmelCase__ = node return dist, parent
702
"""simple docstring""" from math import pi, sqrt def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" if num <= 0: raise ValueError("""math domain error""" ) if num > 1_71.5: raise OverflowError("""math range error""" ) elif num - int(lowerCamelCase__ ) not in (0, 0.5): raise NotImplementedError("""num must be an integer or a half-integer""" ) elif num == 0.5: return sqrt(lowerCamelCase__ ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def _UpperCAmelCase ( ): """simple docstring""" assert gamma(0.5 ) == sqrt(lowerCamelCase__ ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() __lowerCAmelCase : Dict = 1.0 while num: __lowerCAmelCase : Any = float(input("Gamma of: ")) print(F"gamma({num}) = {gamma(num)}") print("\nEnter 0 to exit...")
674
0
"""simple docstring""" from abc import ABC, abstractmethod from typing import List, Optional class a_ ( _SCREAMING_SNAKE_CASE ): def __init__( self : Optional[int] ): self.test() def _SCREAMING_SNAKE_CASE ( self : int ): lowerCAmelCase__ = 0 lowerCAmelCase__ = False while not completed: if counter == 1: self.reset() lowerCAmelCase__ = self.advance() if not self.does_advance(A_ ): raise Exception( """Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.update(A_ ) counter += 1 if counter > 10000: raise Exception("""update() does not fulfill the constraint.""" ) if self.remaining() != 0: raise Exception("""Custom Constraint is not defined correctly.""" ) @abstractmethod def _SCREAMING_SNAKE_CASE ( self : str ): raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : str ): raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : int ): raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def _SCREAMING_SNAKE_CASE ( self : List[str] ): raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def _SCREAMING_SNAKE_CASE ( self : Tuple ): raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) @abstractmethod def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : int=False ): raise NotImplementedError( F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" ) class a_ ( _SCREAMING_SNAKE_CASE ): def __init__( self : List[str] , snake_case__ : Optional[Any] ): super(A_ , self ).__init__() if not isinstance(A_ , A_ ) or len(A_ ) == 0: raise ValueError(F"""`token_ids` has to be a non-empty list, but is {token_ids}.""" ) if any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids ): raise ValueError(F"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" ) lowerCAmelCase__ = token_ids lowerCAmelCase__ = len(self.token_ids ) lowerCAmelCase__ = -1 # the index of the currently fulfilled step lowerCAmelCase__ = False def _SCREAMING_SNAKE_CASE ( self : List[Any] ): if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Union[str, Any] ): if not isinstance(A_ , A_ ): raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}""" ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Optional[int] ): if not isinstance(A_ , A_ ): raise ValueError(F"""`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}""" ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False if self.does_advance(A_ ): self.fulfilled_idx += 1 lowerCAmelCase__ = True if self.fulfilled_idx == (self.seqlen - 1): lowerCAmelCase__ = True lowerCAmelCase__ = completed else: # failed to make progress. lowerCAmelCase__ = True self.reset() return stepped, completed, reset def _SCREAMING_SNAKE_CASE ( self : Dict ): lowerCAmelCase__ = False lowerCAmelCase__ = 0 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): return self.seqlen - (self.fulfilled_idx + 1) def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : List[Any]=False ): lowerCAmelCase__ = PhrasalConstraint(self.token_ids ) if stateful: lowerCAmelCase__ = self.seqlen lowerCAmelCase__ = self.fulfilled_idx lowerCAmelCase__ = self.completed return new_constraint class a_ : def __init__( self : List[str] , snake_case__ : List[str] , snake_case__ : Optional[int]=True ): lowerCAmelCase__ = max([len(A_ ) for one in nested_token_ids] ) lowerCAmelCase__ = {} for token_ids in nested_token_ids: lowerCAmelCase__ = root for tidx, token_id in enumerate(A_ ): if token_id not in level: lowerCAmelCase__ = {} lowerCAmelCase__ = level[token_id] if no_subsets and self.has_subsets(A_ , A_ ): raise ValueError( """Each list in `nested_token_ids` can\'t be a complete subset of another list, but is""" F""" {nested_token_ids}.""" ) lowerCAmelCase__ = root def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Optional[Any] ): lowerCAmelCase__ = self.trie for current_token in current_seq: lowerCAmelCase__ = start[current_token] lowerCAmelCase__ = list(start.keys() ) return next_tokens def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : str ): lowerCAmelCase__ = self.next_tokens(A_ ) return len(A_ ) == 0 def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : str ): lowerCAmelCase__ = list(root.values() ) if len(A_ ) == 0: return 1 else: return sum([self.count_leaves(A_ ) for nn in next_nodes] ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Dict ): lowerCAmelCase__ = self.count_leaves(A_ ) return len(A_ ) != leaf_count class a_ ( _SCREAMING_SNAKE_CASE ): def __init__( self : int , snake_case__ : str ): super(A_ , self ).__init__() if not isinstance(A_ , A_ ) or len(A_ ) == 0: raise ValueError(F"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" ) if any(not isinstance(A_ , A_ ) for token_ids in nested_token_ids ): raise ValueError(F"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" ) if any( any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( F"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" ) lowerCAmelCase__ = DisjunctiveTrie(A_ ) lowerCAmelCase__ = nested_token_ids lowerCAmelCase__ = self.trie.max_height lowerCAmelCase__ = [] lowerCAmelCase__ = False def _SCREAMING_SNAKE_CASE ( self : Tuple ): lowerCAmelCase__ = self.trie.next_tokens(self.current_seq ) if len(A_ ) == 0: return None else: return token_list def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : int ): if not isinstance(A_ , A_ ): raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}""" ) lowerCAmelCase__ = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : int ): if not isinstance(A_ , A_ ): raise ValueError(F"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}""" ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False if self.does_advance(A_ ): self.current_seq.append(A_ ) lowerCAmelCase__ = True else: lowerCAmelCase__ = True self.reset() lowerCAmelCase__ = self.trie.reached_leaf(self.current_seq ) lowerCAmelCase__ = completed return stepped, completed, reset def _SCREAMING_SNAKE_CASE ( self : str ): lowerCAmelCase__ = False lowerCAmelCase__ = [] def _SCREAMING_SNAKE_CASE ( self : Any ): if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Dict=False ): lowerCAmelCase__ = DisjunctiveConstraint(self.token_ids ) if stateful: lowerCAmelCase__ = self.seqlen lowerCAmelCase__ = self.current_seq lowerCAmelCase__ = self.completed return new_constraint class a_ : def __init__( self : Dict , snake_case__ : Tuple ): lowerCAmelCase__ = constraints # max # of steps required to fulfill a given constraint lowerCAmelCase__ = max([c.seqlen for c in constraints] ) lowerCAmelCase__ = len(A_ ) lowerCAmelCase__ = False self.init_state() def _SCREAMING_SNAKE_CASE ( self : Tuple ): lowerCAmelCase__ = [] lowerCAmelCase__ = None lowerCAmelCase__ = [constraint.copy(stateful=A_ ) for constraint in self.constraints] def _SCREAMING_SNAKE_CASE ( self : Tuple ): lowerCAmelCase__ = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def _SCREAMING_SNAKE_CASE ( self : str ): lowerCAmelCase__ = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" lowerCAmelCase__ = constraint.advance() if isinstance(A_ , A_ ): token_list.append(A_ ) elif isinstance(A_ , A_ ): token_list.extend(A_ ) else: lowerCAmelCase__ = self.inprogress_constraint.advance() if isinstance(A_ , A_ ): token_list.append(A_ ) elif isinstance(A_ , A_ ): token_list.extend(A_ ) if len(A_ ) == 0: return None else: return token_list def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : Tuple ): self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint lowerCAmelCase__ , lowerCAmelCase__ = self.add(A_ ) # the entire list of constraints are fulfilled if self.completed: break def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : str ): if not isinstance(A_ , A_ ): raise ValueError(F"""`token_id` should be an `int`, but is `{token_id}`.""" ) lowerCAmelCase__ , lowerCAmelCase__ = False, False if self.completed: lowerCAmelCase__ = True lowerCAmelCase__ = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.inprogress_constraint.update(A_ ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=A_ ) ) lowerCAmelCase__ = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) lowerCAmelCase__ = None if len(self.pending_constraints ) == 0: # we're done! lowerCAmelCase__ = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(A_ ): lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = pending_constraint.update(A_ ) if not stepped: raise Exception( """`constraint.update(token_id)` is not yielding incremental progress, """ """even though `constraint.does_advance(token_id)` is true.""" ) if complete: self.complete_constraints.append(A_ ) lowerCAmelCase__ = None if not complete and stepped: lowerCAmelCase__ = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". lowerCAmelCase__ = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. lowerCAmelCase__ = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Tuple=True ): lowerCAmelCase__ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: lowerCAmelCase__ = [ constraint.copy(stateful=A_ ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: lowerCAmelCase__ = self.inprogress_constraint.copy(stateful=A_ ) lowerCAmelCase__ = [constraint.copy() for constraint in self.pending_constraints] return new_state
703
"""simple docstring""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class a_ : def __init__( self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Any=13 , snake_case__ : int=30 , snake_case__ : int=2 , snake_case__ : Union[str, Any]=3 , snake_case__ : Dict=True , snake_case__ : Optional[int]=True , snake_case__ : List[Any]=32 , snake_case__ : List[str]=2 , snake_case__ : Optional[Any]=4 , snake_case__ : Optional[int]=37 , snake_case__ : Tuple="gelu" , snake_case__ : str=0.1 , snake_case__ : Any=0.1 , snake_case__ : int=10 , snake_case__ : Dict=0.02 , snake_case__ : Union[str, Any]=3 , snake_case__ : str=None , snake_case__ : List[Any]=2 , ): lowerCAmelCase__ = parent lowerCAmelCase__ = batch_size lowerCAmelCase__ = image_size lowerCAmelCase__ = patch_size lowerCAmelCase__ = num_channels lowerCAmelCase__ = is_training lowerCAmelCase__ = use_labels lowerCAmelCase__ = hidden_size lowerCAmelCase__ = num_hidden_layers lowerCAmelCase__ = num_attention_heads lowerCAmelCase__ = intermediate_size lowerCAmelCase__ = hidden_act lowerCAmelCase__ = hidden_dropout_prob lowerCAmelCase__ = attention_probs_dropout_prob lowerCAmelCase__ = type_sequence_label_size lowerCAmelCase__ = initializer_range lowerCAmelCase__ = scope lowerCAmelCase__ = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) lowerCAmelCase__ = (image_size // patch_size) ** 2 lowerCAmelCase__ = num_patches + 2 def _SCREAMING_SNAKE_CASE ( self : Any ): lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ = None if self.use_labels: lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : List[Any] ): return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : List[str] ): lowerCAmelCase__ = TFDeiTModel(config=snake_case__ ) lowerCAmelCase__ = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict ): lowerCAmelCase__ = TFDeiTForMaskedImageModeling(config=snake_case__ ) lowerCAmelCase__ = model(snake_case__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCAmelCase__ = 1 lowerCAmelCase__ = TFDeiTForMaskedImageModeling(snake_case__ ) lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase__ = model(snake_case__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Tuple ): lowerCAmelCase__ = self.type_sequence_label_size lowerCAmelCase__ = TFDeiTForImageClassification(snake_case__ ) lowerCAmelCase__ = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCAmelCase__ = 1 lowerCAmelCase__ = TFDeiTForImageClassification(snake_case__ ) lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase__ = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): lowerCAmelCase__ = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs lowerCAmelCase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class a_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ): UpperCamelCase_ : Optional[Any] = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) UpperCamelCase_ : Any = ( { "feature-extraction": TFDeiTModel, "image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) UpperCamelCase_ : Optional[Any] = False UpperCamelCase_ : Optional[Any] = False UpperCamelCase_ : Optional[int] = False UpperCamelCase_ : int = False def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): lowerCAmelCase__ = TFDeiTModelTester(self ) lowerCAmelCase__ = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="""DeiT does not use inputs_embeds""" ) def _SCREAMING_SNAKE_CASE ( self : Any ): pass def _SCREAMING_SNAKE_CASE ( self : str ): lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ = model_class(snake_case__ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) lowerCAmelCase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case__ , tf.keras.layers.Dense ) ) def _SCREAMING_SNAKE_CASE ( self : Dict ): lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ = model_class(snake_case__ ) lowerCAmelCase__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ = [*signature.parameters.keys()] lowerCAmelCase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : int ): lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : int ): lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Union[str, Any]=False ): lowerCAmelCase__ = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def _SCREAMING_SNAKE_CASE ( self : Any ): for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ = TFDeiTModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def _UpperCAmelCase ( ): """simple docstring""" lowerCAmelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class a_ ( unittest.TestCase ): @cached_property def _SCREAMING_SNAKE_CASE ( self : Any ): return ( DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ) if is_vision_available() else None ) @slow def _SCREAMING_SNAKE_CASE ( self : List[Any] ): lowerCAmelCase__ = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ) lowerCAmelCase__ = self.default_image_processor lowerCAmelCase__ = prepare_img() lowerCAmelCase__ = image_processor(images=snake_case__ , return_tensors="""tf""" ) # forward pass lowerCAmelCase__ = model(**snake_case__ ) # verify the logits lowerCAmelCase__ = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , snake_case__ ) lowerCAmelCase__ = tf.constant([-1.0266, 0.1912, -1.2861] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
674
0
"""simple docstring""" import os # Precomputes a list of the 100 first triangular numbers __lowerCAmelCase : List[Any] = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)] def _UpperCAmelCase ( ): """simple docstring""" lowerCAmelCase__ = os.path.dirname(os.path.realpath(_lowercase ) ) lowerCAmelCase__ = os.path.join(_lowercase , """words.txt""" ) lowerCAmelCase__ = '' with open(_lowercase ) as f: lowerCAmelCase__ = f.readline() lowerCAmelCase__ = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )] lowerCAmelCase__ = [ word for word in [sum(ord(_lowercase ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(_lowercase ) if __name__ == "__main__": print(solution())
704
"""simple docstring""" from __future__ import annotations from math import gcd def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 2 , lowerCamelCase__ = 1 , lowerCamelCase__ = 3 , ): """simple docstring""" if num < 2: raise ValueError("""The input value cannot be less than 2""" ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int: return (pow(lowerCamelCase__ , 2 ) + step) % modulus for _ in range(lowerCamelCase__ ): # These track the position within the cycle detection logic. lowerCAmelCase__ = seed lowerCAmelCase__ = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. lowerCAmelCase__ = gcd(hare - tortoise , lowerCamelCase__ ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. lowerCAmelCase__ = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse __lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( "num", type=int, help="The value to find a divisor of", ) parser.add_argument( "--attempts", type=int, default=3, help="The number of attempts before giving up", ) __lowerCAmelCase : List[str] = parser.parse_args() __lowerCAmelCase : Dict = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(F"{args.num} is probably prime") else: __lowerCAmelCase : List[str] = args.num // divisor print(F"{args.num} = {divisor} * {quotient}")
674
0
"""simple docstring""" import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class a_ ( unittest.TestCase ): UpperCamelCase_ : Optional[Any] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Optional[int] ): lowerCAmelCase__ = hf_hub_download( repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" ) lowerCAmelCase__ = VideoClassificationPipeline(model=__lowerCamelCase , image_processor=__lowerCamelCase , top_k=2 ) lowerCAmelCase__ = [ example_video_filepath, "https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4", ] return video_classifier, examples def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] ): for example in examples: lowerCAmelCase__ = video_classifier(__lowerCamelCase ) self.assertEqual( __lowerCamelCase , [ {"""score""": ANY(__lowerCamelCase ), """label""": ANY(__lowerCamelCase )}, {"""score""": ANY(__lowerCamelCase ), """label""": ANY(__lowerCamelCase )}, ] , ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): lowerCAmelCase__ = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification" lowerCAmelCase__ = VideoMAEFeatureExtractor( size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} ) lowerCAmelCase__ = pipeline( """video-classification""" , model=__lowerCamelCase , feature_extractor=__lowerCamelCase , frame_sampling_rate=4 ) lowerCAmelCase__ = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" ) lowerCAmelCase__ = video_classifier(__lowerCamelCase , top_k=2 ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}] , ) lowerCAmelCase__ = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(__lowerCamelCase , decimals=4 ) , [ [{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}], [{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}], ] , ) @require_tf def _SCREAMING_SNAKE_CASE ( self : Dict ): pass
705
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = TapasConfig.from_json_file(lowerCamelCase__ ) # set absolute/relative position embeddings parameter lowerCAmelCase__ = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ ) elif task == "WTQ": # run_task_main.py hparams lowerCAmelCase__ = 4 lowerCAmelCase__ = True # hparam_utils.py hparams lowerCAmelCase__ = 0.66_46_94 lowerCAmelCase__ = 0.20_79_51 lowerCAmelCase__ = 0.12_11_94 lowerCAmelCase__ = True lowerCAmelCase__ = True lowerCAmelCase__ = False lowerCAmelCase__ = 0.0_35_25_13 lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams lowerCAmelCase__ = 4 lowerCAmelCase__ = False # hparam_utils.py hparams lowerCAmelCase__ = 36.45_19 lowerCAmelCase__ = 0.90_34_21 lowerCAmelCase__ = 2_22.0_88 lowerCAmelCase__ = True lowerCAmelCase__ = True lowerCAmelCase__ = True lowerCAmelCase__ = 0.76_31_41 lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ ) elif task == "TABFACT": lowerCAmelCase__ = TapasForSequenceClassification(config=lowerCamelCase__ ) elif task == "MLM": lowerCAmelCase__ = TapasForMaskedLM(config=lowerCamelCase__ ) elif task == "INTERMEDIATE_PRETRAINING": lowerCAmelCase__ = TapasModel(config=lowerCamelCase__ ) else: raise ValueError(f"""Task {task} not supported.""" ) print(f"""Building PyTorch model from configuration: {config}""" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Save pytorch-model (weights and configuration) print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(lowerCamelCase__ ) # Save tokenizer files print(f"""Save tokenizer files to {pytorch_dump_path}""" ) lowerCAmelCase__ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 ) tokenizer.save_pretrained(lowerCamelCase__ ) print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": __lowerCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA." ) parser.add_argument( "--reset_position_index_per_cell", default=False, action="store_true", help="Whether to use relative position embeddings or not. Defaults to True.", ) parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--tapas_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained TAPAS model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __lowerCAmelCase : Union[str, Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
674
0
"""simple docstring""" import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser __lowerCAmelCase : Optional[Any] = logging.getLogger(__name__) torch.set_grad_enabled(False) __lowerCAmelCase : List[str] = "cuda" if torch.cuda.is_available() else "cpu" def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__=100 , lowerCamelCase__=" " ): """simple docstring""" lowerCAmelCase__ = text.split(lowerCamelCase_ ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(lowerCamelCase_ ) , lowerCamelCase_ )] def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = [], [] for title, text in zip(documents["""title"""] , documents["""text"""] ): if text is not None: for passage in split_text(lowerCamelCase_ ): titles.append(title if title is not None else """""" ) texts.append(lowerCamelCase_ ) return {"title": titles, "text": texts} def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = ctx_tokenizer( documents["""title"""] , documents["""text"""] , truncation=lowerCamelCase_ , padding="""longest""" , return_tensors="""pt""" )['input_ids'] lowerCAmelCase__ = ctx_encoder(input_ids.to(device=lowerCamelCase_ ) , return_dict=lowerCamelCase_ ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ): """simple docstring""" logger.info("""Step 1 - Create the dataset""" ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way lowerCAmelCase__ = load_dataset( """csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words lowerCAmelCase__ = dataset.map(lowerCamelCase_ , batched=lowerCamelCase_ , num_proc=processing_args.num_proc ) # And compute the embeddings lowerCAmelCase__ = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=lowerCamelCase_ ) lowerCAmelCase__ = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) lowerCAmelCase__ = Features( {"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space lowerCAmelCase__ = dataset.map( partial(lowerCamelCase_ , ctx_encoder=lowerCamelCase_ , ctx_tokenizer=lowerCamelCase_ ) , batched=lowerCamelCase_ , batch_size=processing_args.batch_size , features=lowerCamelCase_ , ) # And finally save your dataset lowerCAmelCase__ = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" ) dataset.save_to_disk(lowerCamelCase_ ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info("""Step 2 - Index the dataset""" ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search lowerCAmelCase__ = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index("""embeddings""" , custom_index=lowerCamelCase_ ) # And save the index lowerCAmelCase__ = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" ) dataset.get_index("""embeddings""" ).save(lowerCamelCase_ ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class a_ : UpperCamelCase_ : Any = field( default=str(Path(__lowerCamelCase ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns \'title\' and \'text\'"} , ) UpperCamelCase_ : Optional[int] = field( default=__lowerCamelCase , metadata={"help": "Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'."} , ) UpperCamelCase_ : str = field( default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\'"} , ) UpperCamelCase_ : Tuple = field( default="facebook/dpr-ctx_encoder-multiset-base" , metadata={ "help": ( "The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or" " \'facebook/dpr-ctx_encoder-multiset-base\'" ) } , ) UpperCamelCase_ : Optional[Any] = field( default=str(Path(__lowerCamelCase ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , ) @dataclass class a_ : UpperCamelCase_ : List[str] = field( default=__lowerCamelCase , metadata={ "help": "The number of processes to use to split the documents into passages. Default is single process." } , ) UpperCamelCase_ : List[Any] = field( default=16 , metadata={ "help": "The batch size to use when computing the passages embeddings using the DPR context encoder." } , ) @dataclass class a_ : UpperCamelCase_ : List[Any] = field( default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , ) UpperCamelCase_ : Optional[Any] = field( default=128 , metadata={ "help": ( "The number of bi-directional links created for every new element during the HNSW index construction." ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) __lowerCAmelCase : List[str] = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[str] = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: __lowerCAmelCase : Dict = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
706
"""simple docstring""" def _UpperCAmelCase ( lowerCamelCase__ = 50 ): """simple docstring""" lowerCAmelCase__ = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(F"{solution() = }")
674
0
"""simple docstring""" import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class a_ ( tf.keras.layers.Layer ): def __init__( self : Optional[Any] , snake_case__ : Any , snake_case__ : Dict , snake_case__ : str = None , snake_case__ : Dict = None ): super().__init__() lowerCAmelCase__ = pad_token_id lowerCAmelCase__ = max_length lowerCAmelCase__ = vocab lowerCAmelCase__ = merges lowerCAmelCase__ = BytePairTokenizer(_lowercase , _lowercase , sequence_length=_lowercase ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Any , snake_case__ : str , *snake_case__ : Tuple , **snake_case__ : Optional[Any] ): lowerCAmelCase__ = [' '.join(_lowercase ) for m in tokenizer.bpe_ranks.keys()] lowerCAmelCase__ = tokenizer.get_vocab() return cls(_lowercase , _lowercase , *_lowercase , **_lowercase ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] , snake_case__ : Any , *snake_case__ : Tuple , **snake_case__ : int ): lowerCAmelCase__ = GPTaTokenizer.from_pretrained(_lowercase , *_lowercase , **_lowercase ) return cls.from_tokenizer(_lowercase , *_lowercase , **_lowercase ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[int] , snake_case__ : List[Any] ): return cls(**_lowercase ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : int , snake_case__ : List[str] = None ): lowerCAmelCase__ = self.tf_tokenizer(_lowercase ) lowerCAmelCase__ = tf.ones_like(_lowercase ) if self.pad_token_id is not None: # pad the tokens up to max length lowerCAmelCase__ = max_length if max_length is not None else self.max_length if max_length is not None: lowerCAmelCase__ = pad_model_inputs( _lowercase , max_seq_length=_lowercase , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
707
"""simple docstring""" import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse("0.8.3"): raise Exception("requires gluonnlp == 0.8.3") if version.parse(mx.__version__) != version.parse("1.5.0"): raise Exception("requires mxnet == 1.5.0") logging.set_verbosity_info() __lowerCAmelCase : Any = logging.get_logger(__name__) __lowerCAmelCase : Any = "The Nymphenburg Palace is a beautiful palace in Munich!" def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = { """attention_cell""": """multi_head""", """num_layers""": 4, """units""": 1024, """hidden_size""": 768, """max_length""": 512, """num_heads""": 8, """scaled""": True, """dropout""": 0.1, """use_residual""": True, """embed_size""": 1024, """embed_dropout""": 0.1, """word_embed""": None, """layer_norm_eps""": 1e-5, """token_type_vocab_size""": 2, } lowerCAmelCase__ = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py lowerCAmelCase__ = BERTEncoder( attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=lowerCamelCase__ , output_all_encodings=lowerCamelCase__ , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , lowerCamelCase__ ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later lowerCAmelCase__ = """openwebtext_ccnews_stories_books_cased""" # Specify download folder to Gluonnlp's vocab lowerCAmelCase__ = os.path.join(get_home_dir() , """models""" ) lowerCAmelCase__ = _load_vocab(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , cls=lowerCamelCase__ ) lowerCAmelCase__ = nlp.model.BERTModel( lowerCamelCase__ , len(lowerCamelCase__ ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=lowerCamelCase__ , use_token_type_embed=lowerCamelCase__ , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=lowerCamelCase__ , use_decoder=lowerCamelCase__ , ) original_bort.load_parameters(lowerCamelCase__ , cast_dtype=lowerCamelCase__ , ignore_extra=lowerCamelCase__ ) lowerCAmelCase__ = original_bort._collect_params_with_prefix() # Build our config 🤗 lowerCAmelCase__ = { """architectures""": ["""BertForMaskedLM"""], """attention_probs_dropout_prob""": predefined_args["""dropout"""], """hidden_act""": """gelu""", """hidden_dropout_prob""": predefined_args["""dropout"""], """hidden_size""": predefined_args["""embed_size"""], """initializer_range""": 0.02, """intermediate_size""": predefined_args["""hidden_size"""], """layer_norm_eps""": predefined_args["""layer_norm_eps"""], """max_position_embeddings""": predefined_args["""max_length"""], """model_type""": """bort""", """num_attention_heads""": predefined_args["""num_heads"""], """num_hidden_layers""": predefined_args["""num_layers"""], """pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa """type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa """vocab_size""": len(lowerCamelCase__ ), } lowerCAmelCase__ = BertConfig.from_dict(lowerCamelCase__ ) lowerCAmelCase__ = BertForMaskedLM(lowerCamelCase__ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(lowerCamelCase__ ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(lowerCamelCase__ , lowerCamelCase__ ): lowerCAmelCase__ = hf_param.shape lowerCAmelCase__ = to_torch(params[gluon_param] ) lowerCAmelCase__ = gluon_param.shape assert ( shape_hf == shape_gluon ), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers""" return gluon_param lowerCAmelCase__ = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" ) lowerCAmelCase__ = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" ) lowerCAmelCase__ = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" ) lowerCAmelCase__ = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) lowerCAmelCase__ = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): lowerCAmelCase__ = hf_bort_model.bert.encoder.layer[i] # self attention lowerCAmelCase__ = layer.attention.self lowerCAmelCase__ = check_and_map_params( self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" ) lowerCAmelCase__ = check_and_map_params( self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" ) lowerCAmelCase__ = check_and_map_params( self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" ) lowerCAmelCase__ = check_and_map_params( self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" ) lowerCAmelCase__ = check_and_map_params( self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" ) lowerCAmelCase__ = check_and_map_params( self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" ) # self attention output lowerCAmelCase__ = layer.attention.output lowerCAmelCase__ = check_and_map_params( self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" ) lowerCAmelCase__ = check_and_map_params( self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" ) lowerCAmelCase__ = check_and_map_params( self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" ) lowerCAmelCase__ = check_and_map_params( self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" ) # intermediate lowerCAmelCase__ = layer.intermediate lowerCAmelCase__ = check_and_map_params( intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" ) lowerCAmelCase__ = check_and_map_params( intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" ) # output lowerCAmelCase__ = layer.output lowerCAmelCase__ = check_and_map_params( bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" ) lowerCAmelCase__ = check_and_map_params( bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" ) lowerCAmelCase__ = check_and_map_params( bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" ) lowerCAmelCase__ = check_and_map_params( bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models lowerCAmelCase__ = RobertaTokenizer.from_pretrained("""roberta-base""" ) lowerCAmelCase__ = tokenizer.encode_plus(lowerCamelCase__ )["""input_ids"""] # Get gluon output lowerCAmelCase__ = mx.nd.array([input_ids] ) lowerCAmelCase__ = original_bort(inputs=lowerCamelCase__ , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(lowerCamelCase__ ) lowerCAmelCase__ = BertModel.from_pretrained(lowerCamelCase__ ) hf_bort_model.eval() lowerCAmelCase__ = tokenizer.encode_plus(lowerCamelCase__ , return_tensors="""pt""" ) lowerCAmelCase__ = hf_bort_model(**lowerCamelCase__ )[0] lowerCAmelCase__ = output_gluon[0].asnumpy() lowerCAmelCase__ = output_hf[0].detach().numpy() lowerCAmelCase__ = np.max(np.abs(hf_layer - gluon_layer ) ).item() lowerCAmelCase__ = np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) if success: print("""✔️ Both model do output the same tensors""" ) else: print("""❌ Both model do **NOT** output the same tensors""" ) print("""Absolute difference is:""" , lowerCamelCase__ ) if __name__ == "__main__": __lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __lowerCAmelCase : str = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
674
0
"""simple docstring""" from __future__ import annotations import math class a_ : def __init__( self : List[str] , snake_case__ : int ): lowerCAmelCase__ = size # approximate the overall size of segment tree with given value lowerCAmelCase__ = [0 for i in range(0 , 4 * size )] # create array to store lazy update lowerCAmelCase__ = [0 for i in range(0 , 4 * size )] lowerCAmelCase__ = [0 for i in range(0 , 4 * size )] # flag for lazy update def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : int ): return idx * 2 def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : int ): return idx * 2 + 1 def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : list[int] ): if left_element == right_element: lowerCAmelCase__ = a[left_element - 1] else: lowerCAmelCase__ = (left_element + right_element) // 2 self.build(self.left(snake_case__ ) , snake_case__ , snake_case__ , snake_case__ ) self.build(self.right(snake_case__ ) , mid + 1 , snake_case__ , snake_case__ ) lowerCAmelCase__ = max( self.segment_tree[self.left(snake_case__ )] , self.segment_tree[self.right(snake_case__ )] ) def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int ): if self.flag[idx] is True: lowerCAmelCase__ = self.lazy[idx] lowerCAmelCase__ = False if left_element != right_element: lowerCAmelCase__ = self.lazy[idx] lowerCAmelCase__ = self.lazy[idx] lowerCAmelCase__ = True lowerCAmelCase__ = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: lowerCAmelCase__ = val if left_element != right_element: lowerCAmelCase__ = val lowerCAmelCase__ = val lowerCAmelCase__ = True lowerCAmelCase__ = True return True lowerCAmelCase__ = (left_element + right_element) // 2 self.update(self.left(snake_case__ ) , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) self.update(self.right(snake_case__ ) , mid + 1 , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) lowerCAmelCase__ = max( self.segment_tree[self.left(snake_case__ )] , self.segment_tree[self.right(snake_case__ )] ) return True def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int ): if self.flag[idx] is True: lowerCAmelCase__ = self.lazy[idx] lowerCAmelCase__ = False if left_element != right_element: lowerCAmelCase__ = self.lazy[idx] lowerCAmelCase__ = self.lazy[idx] lowerCAmelCase__ = True lowerCAmelCase__ = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] lowerCAmelCase__ = (left_element + right_element) // 2 lowerCAmelCase__ = self.query(self.left(snake_case__ ) , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) lowerCAmelCase__ = self.query(self.right(snake_case__ ) , mid + 1 , snake_case__ , snake_case__ , snake_case__ ) return max(snake_case__ , snake_case__ ) def __str__( self : Tuple ): return str([self.query(1 , 1 , self.size , snake_case__ , snake_case__ ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": __lowerCAmelCase : str = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8] __lowerCAmelCase : List[Any] = 15 __lowerCAmelCase : Union[str, Any] = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 11)) print(segt.query(1, 1, size, 7, 12)) segt.update(1, 1, size, 1, 3, 1_11) print(segt.query(1, 1, size, 1, 15)) segt.update(1, 1, size, 7, 8, 2_35) print(segt)
708
"""simple docstring""" import copy import os import cva import numpy as np from matplotlib import pyplot as plt class a_ : def __init__( self : Optional[int] ): lowerCAmelCase__ = """""" lowerCAmelCase__ = """""" lowerCAmelCase__ = [] lowerCAmelCase__ = 0 lowerCAmelCase__ = 256 lowerCAmelCase__ = 0 lowerCAmelCase__ = 0 lowerCAmelCase__ = 0 lowerCAmelCase__ = 0 def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Union[str, Any] ): lowerCAmelCase__ = cva.imread(snake_case__ , 0 ) lowerCAmelCase__ = copy.deepcopy(self.img ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = plt.hist(self.img.ravel() , 256 , [0, 256] , label="""x""" ) lowerCAmelCase__ = np.sum(snake_case__ ) for i in range(len(snake_case__ ) ): lowerCAmelCase__ = x[i] / self.k self.sk += prk lowerCAmelCase__ = (self.L - 1) * self.sk if self.rem != 0: lowerCAmelCase__ = int(last % last ) lowerCAmelCase__ = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(snake_case__ ) lowerCAmelCase__ = int(np.ma.count(self.img ) / self.img[1].size ) lowerCAmelCase__ = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): lowerCAmelCase__ = self.img[j][i] if num != self.last_list[num]: lowerCAmelCase__ = self.last_list[num] cva.imwrite("""output_data/output.jpg""" , self.img ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): plt.hist(self.img.ravel() , 256 , [0, 256] ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): cva.imshow("""Output-Image""" , self.img ) cva.imshow("""Input-Image""" , self.original_image ) cva.waitKey(5000 ) cva.destroyAllWindows() if __name__ == "__main__": __lowerCAmelCase : Dict = os.path.join(os.path.basename(__file__), "image_data/input.jpg") __lowerCAmelCase : Optional[int] = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
674
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase : Any = { 'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'], 'tokenization_luke': ['LukeTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = [ 'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST', 'LukeForEntityClassification', 'LukeForEntityPairClassification', 'LukeForEntitySpanClassification', 'LukeForMultipleChoice', 'LukeForQuestionAnswering', 'LukeForSequenceClassification', 'LukeForTokenClassification', 'LukeForMaskedLM', 'LukeModel', 'LukePreTrainedModel', ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys __lowerCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
709
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class a_ ( __UpperCamelCase ): UpperCamelCase_ : List[str] = ( "This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image." "It takes two arguments named `image` which should be the original image, and `label` which should be a text " "describing the elements what should be identified in the segmentation mask. The tool returns the mask." ) UpperCamelCase_ : str = "CIDAS/clipseg-rd64-refined" UpperCamelCase_ : Any = "image_segmenter" UpperCamelCase_ : Optional[Any] = CLIPSegForImageSegmentation UpperCamelCase_ : List[str] = ["image", "text"] UpperCamelCase_ : int = ["image"] def __init__( self : Tuple , *snake_case__ : str , **snake_case__ : Optional[Any] ): requires_backends(self , ["""vision"""] ) super().__init__(*snake_case__ , **snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : "Image" , snake_case__ : str ): return self.pre_processor(text=[label] , images=[image] , padding=snake_case__ , return_tensors="""pt""" ) def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Tuple ): with torch.no_grad(): lowerCAmelCase__ = self.model(**snake_case__ ).logits return logits def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : List[Any] ): lowerCAmelCase__ = outputs.cpu().detach().numpy() lowerCAmelCase__ = 0 lowerCAmelCase__ = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
674
0
"""simple docstring""" def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" return number | (1 << position) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" return number & ~(1 << position) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" return number ^ (1 << position) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" return ((number >> position) & 1) == 1 def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
710
"""simple docstring""" import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class a_ ( __UpperCamelCase , unittest.TestCase ): UpperCamelCase_ : str = LayoutLMTokenizer UpperCamelCase_ : List[Any] = LayoutLMTokenizerFast UpperCamelCase_ : Dict = True UpperCamelCase_ : Any = True def _SCREAMING_SNAKE_CASE ( self : Tuple ): super().setUp() lowerCAmelCase__ = [ """[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def _SCREAMING_SNAKE_CASE ( self : int , **snake_case__ : Union[str, Any] ): return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Tuple ): lowerCAmelCase__ = """UNwant\u00E9d,running""" lowerCAmelCase__ = """unwanted, running""" return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): lowerCAmelCase__ = self.tokenizer_class(self.vocab_file ) lowerCAmelCase__ = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(snake_case__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [7, 4, 5, 10, 8, 9] ) def _SCREAMING_SNAKE_CASE ( self : List[str] ): pass
674
0
"""simple docstring""" import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel __lowerCAmelCase : Optional[Any] = logging.getLogger(__name__) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" if os.path.exists(_lowerCAmelCase ): if os.path.exists(os.path.join(_lowerCAmelCase , """config.json""" ) ) and os.path.isfile( os.path.join(_lowerCAmelCase , """config.json""" ) ): os.remove(os.path.join(_lowerCAmelCase , """config.json""" ) ) if os.path.exists(os.path.join(_lowerCAmelCase , """pytorch_model.bin""" ) ) and os.path.isfile( os.path.join(_lowerCAmelCase , """pytorch_model.bin""" ) ): os.remove(os.path.join(_lowerCAmelCase , """pytorch_model.bin""" ) ) else: os.makedirs(_lowerCAmelCase ) model.save_pretrained(_lowerCAmelCase ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__=False ): """simple docstring""" lowerCAmelCase__ = 2 if unlogit: lowerCAmelCase__ = torch.pow(_lowerCAmelCase , _lowerCAmelCase ) lowerCAmelCase__ = p * torch.log(_lowerCAmelCase ) lowerCAmelCase__ = 0 return -plogp.sum(dim=-1 ) def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" logger.info("""lv, h >\t""" + """\t""".join(f"""{x + 1}""" for x in range(len(_lowerCAmelCase ) ) ) ) for row in range(len(_lowerCAmelCase ) ): if tensor.dtype != torch.long: logger.info(f"""layer {row + 1}:\t""" + """\t""".join(f"""{x:.5f}""" for x in tensor[row].cpu().data ) ) else: logger.info(f"""layer {row + 1}:\t""" + """\t""".join(f"""{x:d}""" for x in tensor[row].cpu().data ) ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=False ): """simple docstring""" lowerCAmelCase__ = model.config.num_hidden_layers, model.config.num_attention_heads lowerCAmelCase__ = torch.zeros(_lowerCAmelCase , _lowerCAmelCase ).to(args.device ) lowerCAmelCase__ = torch.zeros(_lowerCAmelCase , _lowerCAmelCase ).to(args.device ) if head_mask is None: lowerCAmelCase__ = torch.ones(_lowerCAmelCase , _lowerCAmelCase ).to(args.device ) head_mask.requires_grad_(requires_grad=_lowerCAmelCase ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: lowerCAmelCase__ = None lowerCAmelCase__ = 0.0 lowerCAmelCase__ = 0.0 for step, inputs in enumerate(tqdm(_lowerCAmelCase , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ): lowerCAmelCase__ = tuple(t.to(args.device ) for t in inputs ) (lowerCAmelCase__ ) = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) lowerCAmelCase__ = model(_lowerCAmelCase , labels=_lowerCAmelCase , head_mask=_lowerCAmelCase ) # (loss), lm_logits, presents, (all hidden_states), (attentions) lowerCAmelCase__ = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(_lowerCAmelCase ): lowerCAmelCase__ = entropy(attn.detach() , _lowerCAmelCase ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(_lowerCAmelCase ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: lowerCAmelCase__ = 2 lowerCAmelCase__ = torch.pow(torch.pow(_lowerCAmelCase , _lowerCAmelCase ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20 if not args.dont_normalize_global_importance: lowerCAmelCase__ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info("""Attention entropies""" ) print_ad_tensor(_lowerCAmelCase ) if compute_importance: logger.info("""Head importance scores""" ) print_ad_tensor(_lowerCAmelCase ) logger.info("""Head ranked by importance scores""" ) lowerCAmelCase__ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) lowerCAmelCase__ = torch.arange( head_importance.numel() , device=args.device ) lowerCAmelCase__ = head_ranks.view_as(_lowerCAmelCase ) print_ad_tensor(_lowerCAmelCase ) return attn_entropy, head_importance, total_loss def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = compute_heads_importance(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , compute_entropy=_lowerCAmelCase ) lowerCAmelCase__ = 1 / loss # instead of downsteam score use the LM loss logger.info("""Pruning: original score: %f, threshold: %f""" , _lowerCAmelCase , original_score * args.masking_threshold ) lowerCAmelCase__ = torch.ones_like(_lowerCAmelCase ) lowerCAmelCase__ = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) lowerCAmelCase__ = original_score while current_score >= original_score * args.masking_threshold: lowerCAmelCase__ = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads lowerCAmelCase__ = float("""Inf""" ) lowerCAmelCase__ = head_importance.view(-1 ).sort()[1] if len(_lowerCAmelCase ) <= num_to_mask: print("""BREAK BY num_to_mask""" ) break # mask heads lowerCAmelCase__ = current_heads_to_mask[:num_to_mask] logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) ) lowerCAmelCase__ = new_head_mask.view(-1 ) lowerCAmelCase__ = 0.0 lowerCAmelCase__ = new_head_mask.view_as(_lowerCAmelCase ) lowerCAmelCase__ = new_head_mask.clone().detach() print_ad_tensor(_lowerCAmelCase ) # Compute metric and head importance again lowerCAmelCase__ = compute_heads_importance( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , compute_entropy=_lowerCAmelCase , head_mask=_lowerCAmelCase ) lowerCAmelCase__ = 1 / loss logger.info( """Masking: current score: %f, remaining heads %d (%.1f percents)""" , _lowerCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info("""Final head mask""" ) print_ad_tensor(_lowerCAmelCase ) np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() ) return head_mask def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = datetime.now() lowerCAmelCase__ = compute_heads_importance( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , compute_entropy=_lowerCAmelCase , compute_importance=_lowerCAmelCase , head_mask=_lowerCAmelCase ) lowerCAmelCase__ = 1 / loss lowerCAmelCase__ = datetime.now() - before_time lowerCAmelCase__ = sum(p.numel() for p in model.parameters() ) lowerCAmelCase__ = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_lowerCAmelCase ) ) } for k, v in heads_to_prune.items(): if isinstance(_lowerCAmelCase , _lowerCAmelCase ): lowerCAmelCase__ = [ v, ] assert sum(len(_lowerCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(_lowerCAmelCase ) lowerCAmelCase__ = sum(p.numel() for p in model.parameters() ) lowerCAmelCase__ = datetime.now() lowerCAmelCase__ = compute_heads_importance( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , compute_entropy=_lowerCAmelCase , compute_importance=_lowerCAmelCase , head_mask=_lowerCAmelCase , actually_pruned=_lowerCAmelCase , ) lowerCAmelCase__ = 1 / loss lowerCAmelCase__ = datetime.now() - before_time logger.info( """Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , _lowerCAmelCase , _lowerCAmelCase , pruned_num_params / original_num_params * 100 , ) logger.info("""Pruning: score with masking: %f score with pruning: %f""" , _lowerCAmelCase , _lowerCAmelCase ) logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 100 ) save_model(_lowerCAmelCase , args.output_dir ) def _UpperCAmelCase ( ): """simple docstring""" lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--data_dir""" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , ) parser.add_argument( """--model_name_or_path""" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""Path to pretrained model or model identifier from huggingface.co/models""" , ) parser.add_argument( """--output_dir""" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""The output directory where the model predictions and checkpoints will be written.""" , ) # Other parameters parser.add_argument( """--config_name""" , default="""""" , type=_lowerCAmelCase , help="""Pretrained config name or path if not the same as model_name_or_path""" , ) parser.add_argument( """--tokenizer_name""" , default="""""" , type=_lowerCAmelCase , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , ) parser.add_argument( """--cache_dir""" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="""Where do you want to store the pre-trained models downloaded from s3""" , ) parser.add_argument( """--data_subset""" , type=_lowerCAmelCase , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" ) parser.add_argument( """--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" ) parser.add_argument( """--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" ) parser.add_argument( """--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don't normalize importance score by layers""" ) parser.add_argument( """--dont_normalize_global_importance""" , action="""store_true""" , help="""Don't normalize all importance scores between 0 and 1""" , ) parser.add_argument( """--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" ) parser.add_argument( """--masking_threshold""" , default=0.9 , type=_lowerCAmelCase , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , ) parser.add_argument( """--masking_amount""" , default=0.1 , type=_lowerCAmelCase , help="""Amount to heads to masking at each masking step.""" ) parser.add_argument("""--metric_name""" , default="""acc""" , type=_lowerCAmelCase , help="""Metric to use for head masking.""" ) parser.add_argument( """--max_seq_length""" , default=128 , type=_lowerCAmelCase , help=( """The maximum total input sequence length after WordPiece tokenization. \n""" """Sequences longer than this will be truncated, sequences shorter padded.""" ) , ) parser.add_argument("""--batch_size""" , default=1 , type=_lowerCAmelCase , help="""Batch size.""" ) parser.add_argument("""--seed""" , type=_lowerCAmelCase , default=42 ) parser.add_argument("""--local_rank""" , type=_lowerCAmelCase , default=-1 , help="""local_rank for distributed training on gpus""" ) parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" ) parser.add_argument("""--server_ip""" , type=_lowerCAmelCase , default="""""" , help="""Can be used for distant debugging.""" ) parser.add_argument("""--server_port""" , type=_lowerCAmelCase , default="""""" , help="""Can be used for distant debugging.""" ) lowerCAmelCase__ = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("""Waiting for debugger attach""" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_lowerCAmelCase ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: lowerCAmelCase__ = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" ) lowerCAmelCase__ = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) lowerCAmelCase__ = torch.device("""cuda""" , args.local_rank ) lowerCAmelCase__ = 1 torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) lowerCAmelCase__ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: lowerCAmelCase__ = nn.parallel.DistributedDataParallel( _lowerCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_lowerCAmelCase ) elif args.n_gpu > 1: lowerCAmelCase__ = nn.DataParallel(_lowerCAmelCase ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=_lowerCAmelCase ) torch.save(_lowerCAmelCase , os.path.join(args.output_dir , """run_args.bin""" ) ) logger.info("""Training/evaluation parameters %s""" , _lowerCAmelCase ) # Prepare dataset lowerCAmelCase__ = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) lowerCAmelCase__ = (torch.from_numpy(_lowerCAmelCase ),) lowerCAmelCase__ = TensorDataset(*_lowerCAmelCase ) lowerCAmelCase__ = RandomSampler(_lowerCAmelCase ) lowerCAmelCase__ = DataLoader(_lowerCAmelCase , sampler=_lowerCAmelCase , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: lowerCAmelCase__ = mask_heads(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) prune_heads(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) if __name__ == "__main__": main()
711
"""simple docstring""" from binascii import hexlify from hashlib import shaaaa from os import urandom # RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for # Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526 __lowerCAmelCase : Any = { # 1536-bit 5: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, # 2048-bit 14: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AACAA68FFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, # 3072-bit 15: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, # 4096-bit 16: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199" + "FFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, # 6144-bit 17: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08" + "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B" + "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9" + "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6" + "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8" + "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C" + "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718" + "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D" + "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D" + "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226" + "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC" + "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26" + "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB" + "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2" + "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127" + "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406" + "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918" + "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151" + "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03" + "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F" + "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B" + "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632" + "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E" + "6DCC4024FFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, # 8192-bit 18: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD" + "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831" + "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B" + "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF" + "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6" + "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3" + "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328" + "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C" + "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE" + "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4" + "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300" + "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568" + "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9" + "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B" + "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A" + "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36" + "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1" + "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92" + "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47" + "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71" + "60C980DD98EDD3DFFFFFFFFFFFFFFFFF", base=16, ), "generator": 2, }, } class a_ : def __init__( self : List[str] , snake_case__ : int = 14 ): if group not in primes: raise ValueError("""Unsupported Group""" ) lowerCAmelCase__ = primes[group]["""prime"""] lowerCAmelCase__ = primes[group]["""generator"""] lowerCAmelCase__ = int(hexlify(urandom(32 ) ) , base=16 ) def _SCREAMING_SNAKE_CASE ( self : Any ): return hex(self.__private_key )[2:] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): lowerCAmelCase__ = pow(self.generator , self.__private_key , self.prime ) return hex(snake_case__ )[2:] def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : int ): # check if the other public key is valid based on NIST SP800-56 return ( 2 <= key <= self.prime - 2 and pow(snake_case__ , (self.prime - 1) // 2 , self.prime ) == 1 ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : str ): lowerCAmelCase__ = int(snake_case__ , base=16 ) if not self.is_valid_public_key(snake_case__ ): raise ValueError("""Invalid public key""" ) lowerCAmelCase__ = pow(snake_case__ , self.__private_key , self.prime ) return shaaaa(str(snake_case__ ).encode() ).hexdigest() @staticmethod def _SCREAMING_SNAKE_CASE ( snake_case__ : int , snake_case__ : int ): # check if the other public key is valid based on NIST SP800-56 return ( 2 <= remote_public_key_str <= prime - 2 and pow(snake_case__ , (prime - 1) // 2 , snake_case__ ) == 1 ) @staticmethod def _SCREAMING_SNAKE_CASE ( snake_case__ : str , snake_case__ : str , snake_case__ : int = 14 ): lowerCAmelCase__ = int(snake_case__ , base=16 ) lowerCAmelCase__ = int(snake_case__ , base=16 ) lowerCAmelCase__ = primes[group]["""prime"""] if not DiffieHellman.is_valid_public_key_static(snake_case__ , snake_case__ ): raise ValueError("""Invalid public key""" ) lowerCAmelCase__ = pow(snake_case__ , snake_case__ , snake_case__ ) return shaaaa(str(snake_case__ ).encode() ).hexdigest() if __name__ == "__main__": import doctest doctest.testmod()
674
0
"""simple docstring""" import argparse __lowerCAmelCase : Any = "docs/source/_static/js/custom.js" def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" with open(__lowerCAmelCase , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCAmelCase__ = f.readlines() lowerCAmelCase__ = 0 # First let's put the right version while not lines[index].startswith("""const stableVersion =""" ): index += 1 lowerCAmelCase__ = f"""const stableVersion = \"v{version}\"\n""" # Then update the dictionary while not lines[index].startswith("""const versionMapping = {""" ): index += 1 # We go until the end while not lines[index].startswith("""}""" ): index += 1 # We add the new version at the end lines[index - 1] += f""" \"v{version}\": \"v{version}\",\n""" with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(__lowerCAmelCase ) if __name__ == "__main__": __lowerCAmelCase : List[Any] = argparse.ArgumentParser() parser.add_argument("--version", help="Release version.") __lowerCAmelCase : Optional[int] = parser.parse_args() update_custom_js(args.version)
712
"""simple docstring""" import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ): """simple docstring""" assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match""" lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ ) if bias is not None: assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match""" lowerCAmelCase__ = nn.Parameter(lowerCamelCase__ ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = np.asarray(weights[0] ) lowerCAmelCase__ = np.asarray(weights[1] ) lowerCAmelCase__ = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , ) set_param( torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = np.asarray(weights[0] ) lowerCAmelCase__ = np.asarray(weights[1] ) lowerCAmelCase__ = np.asarray(weights[2] ) lowerCAmelCase__ = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , ) set_param( torch_layer.self_attention.key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , ) set_param( torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = weights[0][0][0] lowerCAmelCase__ = np.asarray(layer_norm_a[0] ) lowerCAmelCase__ = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , ) # lsh weights + output lowerCAmelCase__ = weights[0][1] if len(lowerCamelCase__ ) < 4: set_layer_weights_in_torch_lsh(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ ) else: set_layer_weights_in_torch_local(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ ) # intermediate weighs lowerCAmelCase__ = weights[2][0][1][2] # Chunked Feed Forward if len(lowerCamelCase__ ) == 4: lowerCAmelCase__ = intermediate_weights[2] # layernorm 2 lowerCAmelCase__ = np.asarray(intermediate_weights[0][0] ) lowerCAmelCase__ = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , ) # intermediate dense lowerCAmelCase__ = np.asarray(intermediate_weights[1][0] ) lowerCAmelCase__ = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , ) # intermediate out lowerCAmelCase__ = np.asarray(intermediate_weights[4][0] ) lowerCAmelCase__ = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = torch_model.reformer # word embeds lowerCAmelCase__ = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase__ ) , ) if isinstance(weights[3] , lowerCamelCase__ ): lowerCAmelCase__ = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): lowerCAmelCase__ = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), f"""{position_embeddings[emb_idx]} emb does not match""" lowerCAmelCase__ = nn.Parameter(torch.tensor(lowerCamelCase__ ) ) lowerCAmelCase__ = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( lowerCamelCase__ ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): lowerCAmelCase__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # output layer norm lowerCAmelCase__ = np.asarray(weights[7][0] ) lowerCAmelCase__ = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , ) # output embeddings lowerCAmelCase__ = np.asarray(weights[9][0] ) lowerCAmelCase__ = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = ReformerConfig.from_json_file(lowerCamelCase__ ) print(f"""Building PyTorch model from configuration: {config}""" ) lowerCAmelCase__ = ReformerModelWithLMHead(lowerCamelCase__ ) with open(lowerCamelCase__ , """rb""" ) as f: lowerCAmelCase__ = pickle.load(lowerCamelCase__ )["""weights"""] set_model_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , config.hidden_size ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , lowerCamelCase__ ) if __name__ == "__main__": __lowerCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained Reformer model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __lowerCAmelCase : Union[str, Any] = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
674
0
"""simple docstring""" from __future__ import annotations def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = str(__UpperCAmelCase ) return len(__UpperCAmelCase ) == 9 and set(__UpperCAmelCase ) == set("""123456789""" ) def _UpperCAmelCase ( ): """simple docstring""" for base_num in range(9999 , 4999 , -1 ): lowerCAmelCase__ = 10_0002 * base_num if is_9_pandigital(__UpperCAmelCase ): return candidate for base_num in range(333 , 99 , -1 ): lowerCAmelCase__ = 100_2003 * base_num if is_9_pandigital(__UpperCAmelCase ): return candidate return None if __name__ == "__main__": print(F"{solution() = }")
713
"""simple docstring""" import os from math import logaa def _UpperCAmelCase ( lowerCamelCase__ = "base_exp.txt" ): """simple docstring""" lowerCAmelCase__ = 0 lowerCAmelCase__ = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(lowerCamelCase__ ) , lowerCamelCase__ ) ) ): lowerCAmelCase__ , lowerCAmelCase__ = list(map(lowerCamelCase__ , line.split(""",""" ) ) ) if x * logaa(lowerCamelCase__ ) > largest: lowerCAmelCase__ = x * logaa(lowerCamelCase__ ) lowerCAmelCase__ = i + 1 return result if __name__ == "__main__": print(solution())
674
0
"""simple docstring""" import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def _UpperCAmelCase ( lowerCamelCase__="" ): """simple docstring""" lowerCAmelCase__ = tempfile.mkdtemp() return os.path.join(lowerCAmelCase__ , str(uuid.uuida() ) + suffix ) @require_soundfile @require_torch class a_ ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : List[str] ): lowerCAmelCase__ = torch.rand(12 , dtype=torch.floataa ) - 0.5 lowerCAmelCase__ = AgentAudio(_UpperCAmelCase ) lowerCAmelCase__ = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(_UpperCAmelCase , agent_type.to_raw() , atol=1E-4 ) ) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(_UpperCAmelCase ) ) # Ensure that the file contains the same value as the original tensor lowerCAmelCase__ , lowerCAmelCase__ = sf.read(_UpperCAmelCase ) self.assertTrue(torch.allclose(_UpperCAmelCase , torch.tensor(_UpperCAmelCase ) , atol=1E-4 ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): lowerCAmelCase__ = torch.rand(12 , dtype=torch.floataa ) - 0.5 lowerCAmelCase__ = get_new_path(suffix=""".wav""" ) sf.write(_UpperCAmelCase , _UpperCAmelCase , 16000 ) lowerCAmelCase__ = AgentAudio(_UpperCAmelCase ) self.assertTrue(torch.allclose(_UpperCAmelCase , agent_type.to_raw() , atol=1E-4 ) ) self.assertEqual(agent_type.to_string() , _UpperCAmelCase ) @require_vision @require_torch class a_ ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : Dict ): lowerCAmelCase__ = torch.randint(0 , 256 , (64, 64, 3) ) lowerCAmelCase__ = AgentImage(_UpperCAmelCase ) lowerCAmelCase__ = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(_UpperCAmelCase , agent_type._tensor , atol=1E-4 ) ) self.assertIsInstance(agent_type.to_raw() , Image.Image ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(_UpperCAmelCase ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): lowerCAmelCase__ = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" lowerCAmelCase__ = Image.open(_UpperCAmelCase ) lowerCAmelCase__ = AgentImage(_UpperCAmelCase ) self.assertTrue(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(_UpperCAmelCase ) ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): lowerCAmelCase__ = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" lowerCAmelCase__ = Image.open(_UpperCAmelCase ) lowerCAmelCase__ = AgentImage(_UpperCAmelCase ) self.assertFalse(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(_UpperCAmelCase ) ) class a_ ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : List[str] ): lowerCAmelCase__ = """Hey!""" lowerCAmelCase__ = AgentText(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , agent_type.to_string() ) self.assertEqual(_UpperCAmelCase , agent_type.to_raw() ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
714
"""simple docstring""" def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" while b: lowerCAmelCase__ , lowerCAmelCase__ = b, a % b return a def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" return a if b == 0 else euclidean_gcd_recursive(lowerCamelCase__ , a % b ) def _UpperCAmelCase ( ): """simple docstring""" print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" ) print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" ) print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" ) print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" ) print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" ) print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" ) print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" ) print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" ) if __name__ == "__main__": main()
674
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCAmelCase : List[str] = { "configuration_whisper": ["WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP", "WhisperConfig", "WhisperOnnxConfig"], "feature_extraction_whisper": ["WhisperFeatureExtractor"], "processing_whisper": ["WhisperProcessor"], "tokenization_whisper": ["WhisperTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : str = ["WhisperTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Any = [ "WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST", "WhisperForConditionalGeneration", "WhisperModel", "WhisperPreTrainedModel", "WhisperForAudioClassification", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Union[str, Any] = [ "TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFWhisperForConditionalGeneration", "TFWhisperModel", "TFWhisperPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = [ "FlaxWhisperForConditionalGeneration", "FlaxWhisperModel", "FlaxWhisperPreTrainedModel", "FlaxWhisperForAudioClassification", ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys __lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
715
"""simple docstring""" import os def _UpperCAmelCase ( ): """simple docstring""" lowerCAmelCase__ = os.path.dirname(os.path.realpath(lowerCamelCase__ ) ) lowerCAmelCase__ = os.path.join(lowerCamelCase__ , """triangle.txt""" ) with open(lowerCamelCase__ ) as f: lowerCAmelCase__ = f.readlines() lowerCAmelCase__ = [] for line in triangle: lowerCAmelCase__ = [] for number in line.strip().split(""" """ ): numbers_from_line.append(int(lowerCamelCase__ ) ) a.append(lowerCamelCase__ ) for i in range(1 , len(lowerCamelCase__ ) ): for j in range(len(a[i] ) ): lowerCAmelCase__ = a[i - 1][j] if j != len(a[i - 1] ) else 0 lowerCAmelCase__ = a[i - 1][j - 1] if j > 0 else 0 a[i][j] += max(lowerCamelCase__ , lowerCamelCase__ ) return max(a[-1] ) if __name__ == "__main__": print(solution())
674
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __lowerCAmelCase : Optional[int] = logging.get_logger(__name__) __lowerCAmelCase : Dict = { "microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json", } class a_ ( a__ , a__ ): UpperCamelCase_ : str = "resnet" UpperCamelCase_ : str = ["basic", "bottleneck"] def __init__( self : int , snake_case__ : Union[str, Any]=3 , snake_case__ : Optional[Any]=64 , snake_case__ : int=[256, 512, 1024, 2048] , snake_case__ : List[Any]=[3, 4, 6, 3] , snake_case__ : Any="bottleneck" , snake_case__ : Optional[int]="relu" , snake_case__ : Any=False , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]=None , **snake_case__ : Any , ): super().__init__(**_A ) if layer_type not in self.layer_types: raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" ) lowerCAmelCase__ = num_channels lowerCAmelCase__ = embedding_size lowerCAmelCase__ = hidden_sizes lowerCAmelCase__ = depths lowerCAmelCase__ = layer_type lowerCAmelCase__ = hidden_act lowerCAmelCase__ = downsample_in_first_stage lowerCAmelCase__ = ['stem'] + [F"""stage{idx}""" for idx in range(1 , len(_A ) + 1 )] lowerCAmelCase__ = get_aligned_output_features_output_indices( out_features=_A , out_indices=_A , stage_names=self.stage_names ) class a_ ( a__ ): UpperCamelCase_ : Tuple = version.parse("1.11" ) @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): return 1E-3
716
"""simple docstring""" import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu __lowerCAmelCase : Union[str, Any] = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json" with io.open(filename, "r", encoding="utf-8") as f: __lowerCAmelCase : Optional[int] = json.load(f) @require_torch class a_ ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Dict ): return FSMTTokenizer.from_pretrained(snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Any ): lowerCAmelCase__ = FSMTForConditionalGeneration.from_pretrained(snake_case__ ).to(snake_case__ ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ["""en-ru""", 26.0], ["""ru-en""", 22.0], ["""en-de""", 22.0], ["""de-en""", 29.0], ] ) @slow def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Any , snake_case__ : int ): # note: this test is not testing the best performance since it only evals a small batch # but it should be enough to detect a regression in the output quality lowerCAmelCase__ = F"""facebook/wmt19-{pair}""" lowerCAmelCase__ = self.get_tokenizer(snake_case__ ) lowerCAmelCase__ = self.get_model(snake_case__ ) lowerCAmelCase__ = bleu_data[pair]["""src"""] lowerCAmelCase__ = bleu_data[pair]["""tgt"""] lowerCAmelCase__ = tokenizer(snake_case__ , return_tensors="""pt""" , truncation=snake_case__ , padding="""longest""" ).to(snake_case__ ) lowerCAmelCase__ = model.generate( input_ids=batch.input_ids , num_beams=8 , ) lowerCAmelCase__ = tokenizer.batch_decode( snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ ) lowerCAmelCase__ = calculate_bleu(snake_case__ , snake_case__ ) print(snake_case__ ) self.assertGreaterEqual(scores["""bleu"""] , snake_case__ )
674
0
"""simple docstring""" from __future__ import annotations def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): # noqa: E741 """simple docstring""" while r - l > 1: lowerCAmelCase__ = (l + r) // 2 if v[m] >= key: lowerCAmelCase__ = m else: lowerCAmelCase__ = m # noqa: E741 return r def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" if len(_lowerCamelCase ) == 0: return 0 lowerCAmelCase__ = [0] * len(_lowerCamelCase ) lowerCAmelCase__ = 1 lowerCAmelCase__ = v[0] for i in range(1 , len(_lowerCamelCase ) ): if v[i] < tail[0]: lowerCAmelCase__ = v[i] elif v[i] > tail[length - 1]: lowerCAmelCase__ = v[i] length += 1 else: lowerCAmelCase__ = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
717
"""simple docstring""" import pprint import requests __lowerCAmelCase : Union[str, Any] = "https://zenquotes.io/api" def _UpperCAmelCase ( ): """simple docstring""" return requests.get(API_ENDPOINT_URL + """/today""" ).json() def _UpperCAmelCase ( ): """simple docstring""" return requests.get(API_ENDPOINT_URL + """/random""" ).json() if __name__ == "__main__": __lowerCAmelCase : Union[str, Any] = random_quotes() pprint.pprint(response)
674
0
"""simple docstring""" from math import factorial def _UpperCAmelCase ( lowerCamelCase__ = 20 ): """simple docstring""" lowerCAmelCase__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... lowerCAmelCase__ = n // 2 return int(factorial(a_ ) / (factorial(a_ ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(20)) else: try: __lowerCAmelCase : List[str] = int(sys.argv[1]) print(solution(n)) except ValueError: print("Invalid entry - please enter a number.")
718
"""simple docstring""" import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class a_ ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): lowerCAmelCase__ = 0 def _SCREAMING_SNAKE_CASE ( self : List[Any] ): lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" ) self.assertIsInstance(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json""" lowerCAmelCase__ = Path(snake_case__ ) / """config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json""" lowerCAmelCase__ = Path(snake_case__ ) / """config.json""" json.dump( {"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ = CLIPConfig() # Create a dummy config file with image_proceesor_type lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json""" lowerCAmelCase__ = Path(snake_case__ ) / """config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ).to_dict() config_dict.pop("""image_processor_type""" ) lowerCAmelCase__ = CLIPImageProcessor(**snake_case__ ) # save in new folder model_config.save_pretrained(snake_case__ ) config.save_pretrained(snake_case__ ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ) # make sure private variable is not incorrectly saved lowerCAmelCase__ = json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Dict ): with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): with self.assertRaisesRegex( snake_case__ , """clip-base is not a local folder and is not a valid model identifier""" ): lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""clip-base""" ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): with self.assertRaisesRegex( snake_case__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ , revision="""aaaaaa""" ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): with self.assertRaisesRegex( snake_case__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ): lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" ) def _SCREAMING_SNAKE_CASE ( self : Any ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(snake_case__ ): lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(snake_case__ ): lowerCAmelCase__ = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(snake_case__ ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ , trust_remote_code=snake_case__ ) self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" ) def _SCREAMING_SNAKE_CASE ( self : Dict ): try: AutoConfig.register("""custom""" , snake_case__ ) AutoImageProcessor.register(snake_case__ , snake_case__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(snake_case__ ): AutoImageProcessor.register(snake_case__ , snake_case__ ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ = Path(snake_case__ ) / """preprocessor_config.json""" lowerCAmelCase__ = Path(snake_case__ ) / """config.json""" json.dump( {"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(snake_case__ , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(snake_case__ , """w""" ) ) lowerCAmelCase__ = CustomImageProcessor.from_pretrained(snake_case__ ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(snake_case__ ) lowerCAmelCase__ = AutoImageProcessor.from_pretrained(snake_case__ ) self.assertIsInstance(snake_case__ , snake_case__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def _SCREAMING_SNAKE_CASE ( self : List[str] ): class a_ ( __UpperCamelCase ): UpperCamelCase_ : Tuple = True try: AutoConfig.register("""custom""" , snake_case__ ) AutoImageProcessor.register(snake_case__ , snake_case__ ) # If remote code is not set, the default is to use local lowerCAmelCase__ = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. lowerCAmelCase__ = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub lowerCAmelCase__ = AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=snake_case__ ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(not hasattr(snake_case__ , """is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
674
0
"""simple docstring""" import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput __lowerCAmelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name def _UpperCAmelCase ( lowerCamelCase__ ) -> int: """simple docstring""" warnings.warn( """The preprocess method is deprecated and will be removed in a future version. Please""" """ use VaeImageProcessor.preprocess instead""" , _A , ) if isinstance(_A , torch.Tensor ): return image elif isinstance(_A , PIL.Image.Image ): lowerCAmelCase__ = [image] if isinstance(image[0] , PIL.Image.Image ): lowerCAmelCase__ , lowerCAmelCase__ = image[0].size lowerCAmelCase__ , lowerCAmelCase__ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 lowerCAmelCase__ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image] lowerCAmelCase__ = np.concatenate(_A , axis=0 ) lowerCAmelCase__ = np.array(_A ).astype(np.floataa ) / 2_55.0 lowerCAmelCase__ = image.transpose(0 , 3 , 1 , 2 ) lowerCAmelCase__ = 2.0 * image - 1.0 lowerCAmelCase__ = torch.from_numpy(_A ) elif isinstance(image[0] , torch.Tensor ): lowerCAmelCase__ = torch.cat(_A , dim=0 ) return image def _UpperCAmelCase ( lowerCamelCase__ ) -> Tuple: """simple docstring""" if isinstance(_A , torch.Tensor ): return mask elif isinstance(_A , PIL.Image.Image ): lowerCAmelCase__ = [mask] if isinstance(mask[0] , PIL.Image.Image ): lowerCAmelCase__ , lowerCAmelCase__ = mask[0].size lowerCAmelCase__ , lowerCAmelCase__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 lowerCAmelCase__ = [np.array(m.convert("""L""" ).resize((w, h) , resample=PIL_INTERPOLATION["""nearest"""] ) )[None, :] for m in mask] lowerCAmelCase__ = np.concatenate(_A , axis=0 ) lowerCAmelCase__ = mask.astype(np.floataa ) / 2_55.0 lowerCAmelCase__ = 0 lowerCAmelCase__ = 1 lowerCAmelCase__ = torch.from_numpy(_A ) elif isinstance(mask[0] , torch.Tensor ): lowerCAmelCase__ = torch.cat(_A , dim=0 ) return mask class a_ ( _UpperCAmelCase ): UpperCamelCase_ : UNetaDModel UpperCamelCase_ : RePaintScheduler def __init__( self : List[str] , snake_case__ : Optional[int] , snake_case__ : str ): super().__init__() self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase ) @torch.no_grad() def __call__( self : Any , snake_case__ : Union[torch.Tensor, PIL.Image.Image] , snake_case__ : Union[torch.Tensor, PIL.Image.Image] , snake_case__ : int = 250 , snake_case__ : float = 0.0 , snake_case__ : int = 10 , snake_case__ : int = 10 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , ): lowerCAmelCase__ = image lowerCAmelCase__ = _preprocess_image(__UpperCamelCase ) lowerCAmelCase__ = original_image.to(device=self.device , dtype=self.unet.dtype ) lowerCAmelCase__ = _preprocess_mask(__UpperCamelCase ) lowerCAmelCase__ = mask_image.to(device=self.device , dtype=self.unet.dtype ) lowerCAmelCase__ = original_image.shape[0] # sample gaussian noise to begin the loop if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) != batch_size: raise ValueError( F"""You have passed a list of generators of length {len(__UpperCamelCase )}, but requested an effective batch""" F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) lowerCAmelCase__ = original_image.shape lowerCAmelCase__ = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , self.device ) lowerCAmelCase__ = eta lowerCAmelCase__ = self.scheduler.timesteps[0] + 1 lowerCAmelCase__ = generator[0] if isinstance(__UpperCamelCase , __UpperCamelCase ) else generator for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): if t < t_last: # predict the noise residual lowerCAmelCase__ = self.unet(__UpperCamelCase , __UpperCamelCase ).sample # compute previous image: x_t -> x_t-1 lowerCAmelCase__ = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample else: # compute the reverse: x_t-1 -> x_t lowerCAmelCase__ = self.scheduler.undo_step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) lowerCAmelCase__ = t lowerCAmelCase__ = (image / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCAmelCase__ = self.numpy_to_pil(__UpperCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__UpperCamelCase )
719
"""simple docstring""" import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class a_ : def __init__( self : Optional[int] , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=64 , snake_case__ : Any=None ): lowerCAmelCase__ = np.random.default_rng(snake_case__ ) lowerCAmelCase__ = length lowerCAmelCase__ = rng.normal(size=(length,) ).astype(np.floataa ) lowerCAmelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self : Optional[Any] ): return self.length def __getitem__( self : List[str] , snake_case__ : Optional[int] ): return {"x": self.x[i], "y": self.y[i]} class a_ ( torch.nn.Module ): def __init__( self : List[str] , snake_case__ : str=0 , snake_case__ : Dict=0 , snake_case__ : Any=False ): super().__init__() lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) lowerCAmelCase__ = True def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Any=None ): if self.first_batch: print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" ) lowerCAmelCase__ = False return x * self.a[0] + self.b[0] class a_ ( torch.nn.Module ): def __init__( self : Any , snake_case__ : Union[str, Any]=0 , snake_case__ : Union[str, Any]=0 , snake_case__ : List[Any]=False ): super().__init__() lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(snake_case__ ).float() ) lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(snake_case__ ).float() ) lowerCAmelCase__ = True def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Optional[Any]=None ): if self.first_batch: print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" ) lowerCAmelCase__ = False return x * self.a + self.b def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 16 ): """simple docstring""" from datasets import load_dataset from transformers import AutoTokenizer lowerCAmelCase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" ) lowerCAmelCase__ = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""} lowerCAmelCase__ = load_dataset("""csv""" , data_files=lowerCamelCase__ ) lowerCAmelCase__ = datasets["""train"""].unique("""label""" ) lowerCAmelCase__ = {v: i for i, v in enumerate(lowerCamelCase__ )} def tokenize_function(lowerCamelCase__ ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase__ = tokenizer( examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" ) if "label" in examples: lowerCAmelCase__ = [label_to_id[l] for l in examples["""label"""]] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowerCAmelCase__ = datasets.map( lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["""sentence1""", """sentence2""", """label"""] , ) def collate_fn(lowerCamelCase__ ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowerCamelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return tokenizer.pad(lowerCamelCase__ , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. lowerCAmelCase__ = DataLoader(tokenized_datasets["""train"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=2 ) lowerCAmelCase__ = DataLoader(tokenized_datasets["""validation"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=1 ) return train_dataloader, eval_dataloader
674
0
"""simple docstring""" import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder __lowerCAmelCase : Tuple = """__DUMMY_TRANSFORMERS_USER__""" __lowerCAmelCase : Dict = """Dummy User""" __lowerCAmelCase : Union[str, Any] = """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""" __lowerCAmelCase : Union[str, Any] = """https://hub-ci.huggingface.co""" __lowerCAmelCase : List[Any] = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}""" __lowerCAmelCase : Union[str, Any] = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}""" __lowerCAmelCase : List[Any] = Path("~/.huggingface/hub_ci_token").expanduser() @pytest.fixture def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" monkeypatch.setattr( """huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , lowerCamelCase__ ) @pytest.fixture def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , lowerCamelCase__ ) monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , lowerCamelCase__ ) @pytest.fixture def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , lowerCamelCase__ ) @pytest.fixture def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" HfFolder.save_token(lowerCamelCase__ ) yield HfFolder.delete_token() @pytest.fixture(scope="""session""" ) def _UpperCAmelCase ( ): """simple docstring""" return HfApi(endpoint=lowerCamelCase__ ) @pytest.fixture(scope="""session""" ) def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" A = HfFolder.get_token() HfFolder.save_token(lowerCamelCase__ ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(lowerCamelCase__ ) @pytest.fixture def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" def _cleanup_repo(lowerCamelCase__ ): hf_api.delete_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="""dataset""" ) return _cleanup_repo @pytest.fixture def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" @contextmanager def _temporary_repo(lowerCamelCase__ ): try: yield repo_id finally: cleanup_repo(lowerCamelCase__ ) return _temporary_repo @pytest.fixture(scope="""session""" ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" A = f"""repo_txt_data-{int(time.time() * 10e3 )}""" A = f"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="""dataset""" , private=lowerCamelCase__ ) hf_api.upload_file( token=lowerCamelCase__ , path_or_fileobj=str(lowerCamelCase__ ) , path_in_repo="""data/text_data.txt""" , repo_id=lowerCamelCase__ , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="""session""" ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" A = f"""repo_zipped_txt_data-{int(time.time() * 10e3 )}""" A = f"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="""dataset""" , private=lowerCamelCase__ ) hf_api.upload_file( token=lowerCamelCase__ , path_or_fileobj=str(lowerCamelCase__ ) , path_in_repo="""data.zip""" , repo_id=lowerCamelCase__ , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="""session""" ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" A = f"""repo_zipped_img_data-{int(time.time() * 10e3 )}""" A = f"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="""dataset""" , private=lowerCamelCase__ ) hf_api.upload_file( token=lowerCamelCase__ , path_or_fileobj=str(lowerCamelCase__ ) , path_in_repo="""data.zip""" , repo_id=lowerCamelCase__ , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(lowerCamelCase__ , token=lowerCamelCase__ , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" return hf_private_dataset_repo_zipped_img_data_
720
"""simple docstring""" import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = MobileBertConfig.from_json_file(lowerCamelCase__ ) print(f"""Building PyTorch model from configuration: {config}""" ) lowerCAmelCase__ = MobileBertForPreTraining(lowerCamelCase__ ) # Load weights from tf checkpoint lowerCAmelCase__ = load_tf_weights_in_mobilebert(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , lowerCamelCase__ ) if __name__ == "__main__": __lowerCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--mobilebert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained MobileBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __lowerCAmelCase : Optional[int] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
674
0
"""simple docstring""" import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) __lowerCAmelCase : str = ['''model.decoder.embed_positions.weights'''] def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" if "emb" in name: lowerCAmelCase__ = name.replace("""emb""" , """model.decoder.embed_tokens""" ) if "transformer" in name: lowerCAmelCase__ = name.replace("""transformer""" , """model.decoder""" ) if "cross_attention" in name: lowerCAmelCase__ = name.replace("""cross_attention""" , """encoder_attn""" ) if "linear1" in name: lowerCAmelCase__ = name.replace("""linear1""" , """fc1""" ) if "linear2" in name: lowerCAmelCase__ = name.replace("""linear2""" , """fc2""" ) if "norm1" in name: lowerCAmelCase__ = name.replace("""norm1""" , """self_attn_layer_norm""" ) if "norm_cross" in name: lowerCAmelCase__ = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" ) if "norm2" in name: lowerCAmelCase__ = name.replace("""norm2""" , """final_layer_norm""" ) if "out_norm" in name: lowerCAmelCase__ = name.replace("""out_norm""" , """model.decoder.layer_norm""" ) if "linears" in name: lowerCAmelCase__ = name.replace("""linears""" , """lm_heads""" ) if "condition_provider.conditioners.description.output_proj" in name: lowerCAmelCase__ = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" ) return name def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = list(state_dict.keys() ) lowerCAmelCase__ = {} for key in keys: lowerCAmelCase__ = state_dict.pop(__UpperCamelCase ) lowerCAmelCase__ = rename_keys(__UpperCamelCase ) if "in_proj_weight" in key: # split fused qkv proj lowerCAmelCase__ = val[:hidden_size, :] lowerCAmelCase__ = val[hidden_size : 2 * hidden_size, :] lowerCAmelCase__ = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: lowerCAmelCase__ = val else: lowerCAmelCase__ = val return state_dict, enc_dec_proj_state_dict def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" if checkpoint == "small": # default config values lowerCAmelCase__ = 1024 lowerCAmelCase__ = 24 lowerCAmelCase__ = 16 elif checkpoint == "medium": lowerCAmelCase__ = 1536 lowerCAmelCase__ = 48 lowerCAmelCase__ = 24 elif checkpoint == "large": lowerCAmelCase__ = 2048 lowerCAmelCase__ = 48 lowerCAmelCase__ = 32 else: raise ValueError(f"""Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.""" ) lowerCAmelCase__ = MusicgenDecoderConfig( hidden_size=__UpperCamelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=__UpperCamelCase , num_attention_heads=__UpperCamelCase , ) return config @torch.no_grad() def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="cpu" ): """simple docstring""" lowerCAmelCase__ = MusicGen.get_pretrained(__UpperCamelCase , device=__UpperCamelCase ) lowerCAmelCase__ = decoder_config_from_checkpoint(__UpperCamelCase ) lowerCAmelCase__ = fairseq_model.lm.state_dict() lowerCAmelCase__ = rename_state_dict( __UpperCamelCase , hidden_size=decoder_config.hidden_size ) lowerCAmelCase__ = TaEncoderModel.from_pretrained("""t5-base""" ) lowerCAmelCase__ = EncodecModel.from_pretrained("""facebook/encodec_32khz""" ) lowerCAmelCase__ = MusicgenForCausalLM(__UpperCamelCase ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection lowerCAmelCase__ = decoder.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase ) for key in missing_keys.copy(): if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(__UpperCamelCase ) if len(__UpperCamelCase ) > 0: raise ValueError(f"""Missing key(s) in state_dict: {missing_keys}""" ) if len(__UpperCamelCase ) > 0: raise ValueError(f"""Unexpected key(s) in state_dict: {unexpected_keys}""" ) # init the composite model lowerCAmelCase__ = MusicgenForConditionalGeneration(text_encoder=__UpperCamelCase , audio_encoder=__UpperCamelCase , decoder=__UpperCamelCase ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(__UpperCamelCase ) # check we can do a forward pass lowerCAmelCase__ = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) lowerCAmelCase__ = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): lowerCAmelCase__ = model(input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase ).logits if logits.shape != (8, 1, 2048): raise ValueError("""Incorrect shape for logits""" ) # now construct the processor lowerCAmelCase__ = AutoTokenizer.from_pretrained("""t5-base""" ) lowerCAmelCase__ = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" ) lowerCAmelCase__ = MusicgenProcessor(feature_extractor=__UpperCamelCase , tokenizer=__UpperCamelCase ) # set the appropriate bos/pad token ids lowerCAmelCase__ = 2048 lowerCAmelCase__ = 2048 # set other default generation config params lowerCAmelCase__ = int(30 * audio_encoder.config.frame_rate ) lowerCAmelCase__ = True lowerCAmelCase__ = 3.0 if pytorch_dump_folder is not None: Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) logger.info(f"""Saving model {checkpoint} to {pytorch_dump_folder}""" ) model.save_pretrained(__UpperCamelCase ) processor.save_pretrained(__UpperCamelCase ) if repo_id: logger.info(f"""Pushing model {checkpoint} to {repo_id}""" ) model.push_to_hub(__UpperCamelCase ) processor.push_to_hub(__UpperCamelCase ) if __name__ == "__main__": __lowerCAmelCase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint", default="small", type=str, help="Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.", ) parser.add_argument( "--pytorch_dump_folder", required=True, default=None, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) parser.add_argument( "--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda." ) __lowerCAmelCase : List[str] = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
721
"""simple docstring""" def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), f"""The input value of [n={number}] is not an integer""" if number == 1: return 2 elif number < 1: lowerCAmelCase__ = f"""The input value of [n={number}] has to be > 0""" raise ValueError(lowerCamelCase__ ) else: lowerCAmelCase__ = sylvester(number - 1 ) lowerCAmelCase__ = num - 1 lowerCAmelCase__ = num return lower * upper + 1 if __name__ == "__main__": print(F"The 8th number in Sylvester's sequence: {sylvester(8)}")
674
0
"""simple docstring""" import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class a_ ( unittest.TestCase ): @slow def _SCREAMING_SNAKE_CASE ( self : Tuple ): lowerCAmelCase__ = FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""" ) lowerCAmelCase__ = AutoTokenizer.from_pretrained("""xlm-roberta-base""" ) lowerCAmelCase__ = """The dog is cute and lives in the garden house""" lowerCAmelCase__ = jnp.array([tokenizer.encode(A_ )] ) lowerCAmelCase__ = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim lowerCAmelCase__ = jnp.array( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) lowerCAmelCase__ = model(A_ )["""last_hidden_state"""] self.assertEqual(output.shape , A_ ) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] , A_ , atol=1E-3 ) )
700
"""simple docstring""" import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __lowerCAmelCase : Optional[Any] = get_tests_dir("fixtures/test_sentencepiece_no_bos.model") @require_sentencepiece @require_tokenizers class a_ ( __UpperCamelCase , unittest.TestCase ): UpperCamelCase_ : Tuple = PegasusTokenizer UpperCamelCase_ : Any = PegasusTokenizerFast UpperCamelCase_ : int = True UpperCamelCase_ : Any = True def _SCREAMING_SNAKE_CASE ( self : Tuple ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ = PegasusTokenizer(snake_case__ ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): return PegasusTokenizer.from_pretrained("""google/pegasus-large""" ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **snake_case__ : Optional[Any] ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Optional[Any] ): return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): lowerCAmelCase__ = """</s>""" lowerCAmelCase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<pad>""" ) self.assertEqual(vocab_keys[1] , """</s>""" ) self.assertEqual(vocab_keys[-1] , """v""" ) self.assertEqual(len(snake_case__ ) , 1103 ) def _SCREAMING_SNAKE_CASE ( self : Any ): self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def _SCREAMING_SNAKE_CASE ( self : Dict ): lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ = ( """Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important""" """ </s> <pad> <pad> <pad>""" ) lowerCAmelCase__ = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] lowerCAmelCase__ = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Dict ): lowerCAmelCase__ = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word lowerCAmelCase__ = """<mask_1> To ensure a <mask_2> flow of bank resolutions.""" lowerCAmelCase__ = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1] lowerCAmelCase__ = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : int ): lowerCAmelCase__ = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 96103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 lowerCAmelCase__ = """To ensure a smooth flow of bank resolutions.""" lowerCAmelCase__ = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1] lowerCAmelCase__ = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def _SCREAMING_SNAKE_CASE ( self : List[str] ): lowerCAmelCase__ = ["""This is going to be way too long.""" * 150, """short example"""] lowerCAmelCase__ = ["""not super long but more than 5 tokens""", """tiny"""] lowerCAmelCase__ = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" ) lowerCAmelCase__ = self._large_tokenizer( text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(snake_case__ ) == 2 # input_ids, attention_mask. @slow def _SCREAMING_SNAKE_CASE ( self : str ): # fmt: off lowerCAmelCase__ = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case__ , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , ) @require_sentencepiece @require_tokenizers class a_ ( __UpperCamelCase , unittest.TestCase ): UpperCamelCase_ : str = PegasusTokenizer UpperCamelCase_ : Optional[int] = PegasusTokenizerFast UpperCamelCase_ : Union[str, Any] = True UpperCamelCase_ : Optional[int] = True def _SCREAMING_SNAKE_CASE ( self : List[str] ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ = PegasusTokenizer(snake_case__ , offset=0 , mask_token_sent=snake_case__ , mask_token="""[MASK]""" ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : Dict ): return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **snake_case__ : List[Any] ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Dict ): return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : List[str] ): lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ = ( """Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>""" """ <pad> <pad> <pad>""" ) lowerCAmelCase__ = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] lowerCAmelCase__ = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) @require_torch def _SCREAMING_SNAKE_CASE ( self : List[Any] ): lowerCAmelCase__ = ["""This is going to be way too long.""" * 1000, """short example"""] lowerCAmelCase__ = ["""not super long but more than 5 tokens""", """tiny"""] lowerCAmelCase__ = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" ) lowerCAmelCase__ = self._large_tokenizer( text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(snake_case__ ) == 2 # input_ids, attention_mask. def _SCREAMING_SNAKE_CASE ( self : List[Any] ): lowerCAmelCase__ = ( """This is an example string that is used to test the original TF implementation against the HF""" """ implementation""" ) lowerCAmelCase__ = self._large_tokenizer(snake_case__ ).input_ids self.assertListEqual( snake_case__ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
674
0
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. __lowerCAmelCase : Tuple = {"LayoutLMv2Config", "LayoutLMv3Config"} @is_pipeline_test class a_ ( unittest.TestCase ): UpperCamelCase_ : Optional[int] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCamelCase_ : Optional[int] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: UpperCamelCase_ : Any = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: UpperCamelCase_ : int = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def _SCREAMING_SNAKE_CASE ( self : str ): lowerCAmelCase__ = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" ) lowerCAmelCase__ = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(snake_case__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) lowerCAmelCase__ = text_classifier("""This is great !""" , top_k=2 ) self.assertEqual( nested_simplify(snake_case__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] ) lowerCAmelCase__ = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 ) self.assertEqual( nested_simplify(snake_case__ ) , [ [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], ] , ) lowerCAmelCase__ = text_classifier("""This is great !""" , top_k=1 ) self.assertEqual(nested_simplify(snake_case__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) # Legacy behavior lowerCAmelCase__ = text_classifier("""This is great !""" , return_all_scores=snake_case__ ) self.assertEqual(nested_simplify(snake_case__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) lowerCAmelCase__ = text_classifier("""This is great !""" , return_all_scores=snake_case__ ) self.assertEqual( nested_simplify(snake_case__ ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] ) lowerCAmelCase__ = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=snake_case__ ) self.assertEqual( nested_simplify(snake_case__ ) , [ [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}], ] , ) lowerCAmelCase__ = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=snake_case__ ) self.assertEqual( nested_simplify(snake_case__ ) , [ {"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_0""", """score""": 0.504}, ] , ) @require_torch def _SCREAMING_SNAKE_CASE ( self : str ): import torch lowerCAmelCase__ = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , ) lowerCAmelCase__ = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(snake_case__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) @require_tf def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): lowerCAmelCase__ = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" ) lowerCAmelCase__ = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(snake_case__ ) , [{"""label""": """LABEL_0""", """score""": 0.504}] ) @slow @require_torch def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): lowerCAmelCase__ = pipeline("""text-classification""" ) lowerCAmelCase__ = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(snake_case__ ) , [{"""label""": """POSITIVE""", """score""": 1.0}] ) lowerCAmelCase__ = text_classifier("""This is bad !""" ) self.assertEqual(nested_simplify(snake_case__ ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] ) lowerCAmelCase__ = text_classifier("""Birds are a type of animal""" ) self.assertEqual(nested_simplify(snake_case__ ) , [{"""label""": """POSITIVE""", """score""": 0.988}] ) @slow @require_tf def _SCREAMING_SNAKE_CASE ( self : str ): lowerCAmelCase__ = pipeline("""text-classification""" , framework="""tf""" ) lowerCAmelCase__ = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(snake_case__ ) , [{"""label""": """POSITIVE""", """score""": 1.0}] ) lowerCAmelCase__ = text_classifier("""This is bad !""" ) self.assertEqual(nested_simplify(snake_case__ ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] ) lowerCAmelCase__ = text_classifier("""Birds are a type of animal""" ) self.assertEqual(nested_simplify(snake_case__ ) , [{"""label""": """POSITIVE""", """score""": 0.988}] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[str] ): lowerCAmelCase__ = TextClassificationPipeline(model=snake_case__ , tokenizer=snake_case__ ) return text_classifier, ["HuggingFace is in", "This is another test"] def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : Tuple , snake_case__ : Dict ): lowerCAmelCase__ = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 lowerCAmelCase__ = """HuggingFace is in""" lowerCAmelCase__ = text_classifier(snake_case__ ) self.assertEqual(nested_simplify(snake_case__ ) , [{"""label""": ANY(snake_case__ ), """score""": ANY(snake_case__ )}] ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() ) lowerCAmelCase__ = ["""HuggingFace is in """, """Paris is in France"""] lowerCAmelCase__ = text_classifier(snake_case__ ) self.assertEqual( nested_simplify(snake_case__ ) , [{"""label""": ANY(snake_case__ ), """score""": ANY(snake_case__ )}, {"""label""": ANY(snake_case__ ), """score""": ANY(snake_case__ )}] , ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() ) self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format lowerCAmelCase__ = text_classifier(snake_case__ , top_k=snake_case__ ) lowerCAmelCase__ = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(snake_case__ ) , [[{"""label""": ANY(snake_case__ ), """score""": ANY(snake_case__ )}] * N, [{"""label""": ANY(snake_case__ ), """score""": ANY(snake_case__ )}] * N] , ) lowerCAmelCase__ = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""} lowerCAmelCase__ = text_classifier(snake_case__ ) self.assertEqual( nested_simplify(snake_case__ ) , {"""label""": ANY(snake_case__ ), """score""": ANY(snake_case__ )} , ) self.assertTrue(outputs["""label"""] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. lowerCAmelCase__ = [["""HuggingFace is in """, """Paris is in France"""]] with self.assertRaises(snake_case__ ): text_classifier(snake_case__ ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility lowerCAmelCase__ = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] ) self.assertEqual( nested_simplify(snake_case__ ) , [{"""label""": ANY(snake_case__ ), """score""": ANY(snake_case__ )}] , ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
701
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class a_ ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : int ): if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=snake_case__ , ) assert hasattr(self , """env""" ) def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Optional[Any] ): # configuration for running training on smdistributed Model Parallel lowerCAmelCase__ = { """enabled""": True, """processes_per_host""": 8, } lowerCAmelCase__ = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } lowerCAmelCase__ = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} lowerCAmelCase__ = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case__ , instance_type=self.instance_type , debugger_hook_config=snake_case__ , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 500, } , metric_definitions=self.env.metric_definitions , distribution=snake_case__ , py_version="""py36""" , ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : str ): TrainingJobAnalytics(snake_case__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : List[str] ): # create estimator lowerCAmelCase__ = self.create_estimator(snake_case__ ) # run training estimator.fit() # result dataframe lowerCAmelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) lowerCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCAmelCase__ = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , snake_case__ )
674
0
"""simple docstring""" import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __lowerCAmelCase = logging.getLogger(__name__) __lowerCAmelCase = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) __lowerCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class a_ : UpperCamelCase_ : int = field( default=__UpperCamelCase , metadata={ "help": ( "The model checkpoint for weights initialization. Leave None if you want to train a model from" " scratch." ) } , ) UpperCamelCase_ : int = field( default=__UpperCamelCase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(__UpperCamelCase )} , ) UpperCamelCase_ : Dict = field( default=__UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) UpperCamelCase_ : Any = field( default=__UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) UpperCamelCase_ : Optional[Any] = field( default=__UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class a_ : UpperCamelCase_ : Union[str, Any] = field( default=__UpperCamelCase , metadata={"help": "The input training data file (a text file)."} ) UpperCamelCase_ : Optional[int] = field( default=__UpperCamelCase , metadata={ "help": ( "The input training data files (multiple files in glob format). " "Very often splitting large files to smaller files can prevent tokenizer going out of memory" ) } , ) UpperCamelCase_ : List[str] = field( default=__UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , ) UpperCamelCase_ : List[Any] = field( default=__UpperCamelCase , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , ) UpperCamelCase_ : Optional[int] = field( default=__UpperCamelCase , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , ) UpperCamelCase_ : Any = field( default=__UpperCamelCase , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , ) UpperCamelCase_ : List[Any] = field( default=__UpperCamelCase , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} ) UpperCamelCase_ : Union[str, Any] = field(default=__UpperCamelCase , metadata={"help": "Whether ot not to use whole word mask."} ) UpperCamelCase_ : Optional[Any] = field( default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} ) UpperCamelCase_ : Any = field( default=1 / 6 , metadata={ "help": ( "Ratio of length of a span of masked tokens to surrounding context length for permutation language" " modeling." ) } , ) UpperCamelCase_ : int = field( default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} ) UpperCamelCase_ : List[str] = field( default=-1 , metadata={ "help": ( "Optional input sequence length after tokenization." "The training dataset will be truncated in block of this size for training." "Default to the model max input length for single sentence inputs (take into account special tokens)." ) } , ) UpperCamelCase_ : List[Any] = field( default=__UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False , lowerCamelCase__ = None , ): """simple docstring""" def _dataset(lowerCamelCase__ , lowerCamelCase__=None ): if args.line_by_line: if ref_path is not None: if not args.whole_word_mask or not args.mlm: raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" ) return LineByLineWithRefDataset( tokenizer=lowerCamelCase__ , file_path=lowerCamelCase__ , block_size=args.block_size , ref_path=lowerCamelCase__ , ) return LineByLineTextDataset(tokenizer=lowerCamelCase__ , file_path=lowerCamelCase__ , block_size=args.block_size ) else: return TextDataset( tokenizer=lowerCamelCase__ , file_path=lowerCamelCase__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCamelCase__ , ) if evaluate: return _dataset(args.eval_data_file , args.eval_ref_file ) elif args.train_data_files: return ConcatDataset([_dataset(lowerCamelCase__ ) for f in glob(args.train_data_files )] ) else: return _dataset(args.train_data_file , args.train_ref_file ) def _UpperCAmelCase ( ): """simple docstring""" lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses() if data_args.eval_data_file is None and training_args.do_eval: raise ValueError( """Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """ """or remove the --do_eval argument.""" ) if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" """ --overwrite_output_dir to overcome.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" , lowerCamelCase__ ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: lowerCAmelCase__ = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: lowerCAmelCase__ = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: lowerCAmelCase__ = CONFIG_MAPPING[model_args.model_type]() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.tokenizer_name: lowerCAmelCase__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: lowerCAmelCase__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: raise ValueError( """You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another""" """ script, save it,and load it from here, using --tokenizer_name""" ) if model_args.model_name_or_path: lowerCAmelCase__ = AutoModelWithLMHead.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , ) else: logger.info("""Training new model from scratch""" ) lowerCAmelCase__ = AutoModelWithLMHead.from_config(lowerCamelCase__ ) model.resize_token_embeddings(len(lowerCamelCase__ ) ) if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm: raise ValueError( """BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the""" """--mlm flag (masked language modeling).""" ) if data_args.block_size <= 0: lowerCAmelCase__ = tokenizer.max_len # Our input block size will be the max possible for the model else: lowerCAmelCase__ = min(data_args.block_size , tokenizer.max_len ) # Get datasets lowerCAmelCase__ = ( get_dataset(lowerCamelCase__ , tokenizer=lowerCamelCase__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None ) lowerCAmelCase__ = ( get_dataset(lowerCamelCase__ , tokenizer=lowerCamelCase__ , evaluate=lowerCamelCase__ , cache_dir=model_args.cache_dir ) if training_args.do_eval else None ) if config.model_type == "xlnet": lowerCAmelCase__ = DataCollatorForPermutationLanguageModeling( tokenizer=lowerCamelCase__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , ) else: if data_args.mlm and data_args.whole_word_mask: lowerCAmelCase__ = DataCollatorForWholeWordMask( tokenizer=lowerCamelCase__ , mlm_probability=data_args.mlm_probability ) else: lowerCAmelCase__ = DataCollatorForLanguageModeling( tokenizer=lowerCamelCase__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer lowerCAmelCase__ = Trainer( model=lowerCamelCase__ , args=lowerCamelCase__ , data_collator=lowerCamelCase__ , train_dataset=lowerCamelCase__ , eval_dataset=lowerCamelCase__ , prediction_loss_only=lowerCamelCase__ , ) # Training if training_args.do_train: lowerCAmelCase__ = ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ) else None ) trainer.train(model_path=lowerCamelCase__ ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation lowerCAmelCase__ = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) lowerCAmelCase__ = trainer.evaluate() lowerCAmelCase__ = math.exp(eval_output["""eval_loss"""] ) lowerCAmelCase__ = {"""perplexity""": perplexity} lowerCAmelCase__ = os.path.join(training_args.output_dir , """eval_results_lm.txt""" ) if trainer.is_world_master(): with open(lowerCamelCase__ , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key in sorted(result.keys() ): logger.info(""" %s = %s""" , lowerCamelCase__ , str(result[key] ) ) writer.write("""%s = %s\n""" % (key, str(result[key] )) ) results.update(lowerCamelCase__ ) return results def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" main() if __name__ == "__main__": main()
702
"""simple docstring""" from math import pi, sqrt def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" if num <= 0: raise ValueError("""math domain error""" ) if num > 1_71.5: raise OverflowError("""math range error""" ) elif num - int(lowerCamelCase__ ) not in (0, 0.5): raise NotImplementedError("""num must be an integer or a half-integer""" ) elif num == 0.5: return sqrt(lowerCamelCase__ ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def _UpperCAmelCase ( ): """simple docstring""" assert gamma(0.5 ) == sqrt(lowerCamelCase__ ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() __lowerCAmelCase : Dict = 1.0 while num: __lowerCAmelCase : Any = float(input("Gamma of: ")) print(F"gamma({num}) = {gamma(num)}") print("\nEnter 0 to exit...")
674
0
"""simple docstring""" class a_ : def __init__( self : Tuple ): lowerCAmelCase__ = {} def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): print(self.vertex ) for i in self.vertex: print(__A , """ -> """ , """ -> """.join([str(__A ) for j in self.vertex[i]] ) ) def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : int , snake_case__ : int ): # check if vertex is already present, if from_vertex in self.vertex: self.vertex[from_vertex].append(__A ) else: # else make a new vertex lowerCAmelCase__ = [to_vertex] def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): # visited array for storing already visited nodes lowerCAmelCase__ = [False] * len(self.vertex ) # call the recursive helper function for i in range(len(self.vertex ) ): if not visited[i]: self.dfs_recursive(__A , __A ) def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : int , snake_case__ : list ): # mark start vertex as visited lowerCAmelCase__ = True print(__A , end=""" """ ) # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(__A , __A ) if __name__ == "__main__": __lowerCAmelCase : str = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print("DFS:") g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
703
"""simple docstring""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class a_ : def __init__( self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Any=13 , snake_case__ : int=30 , snake_case__ : int=2 , snake_case__ : Union[str, Any]=3 , snake_case__ : Dict=True , snake_case__ : Optional[int]=True , snake_case__ : List[Any]=32 , snake_case__ : List[str]=2 , snake_case__ : Optional[Any]=4 , snake_case__ : Optional[int]=37 , snake_case__ : Tuple="gelu" , snake_case__ : str=0.1 , snake_case__ : Any=0.1 , snake_case__ : int=10 , snake_case__ : Dict=0.02 , snake_case__ : Union[str, Any]=3 , snake_case__ : str=None , snake_case__ : List[Any]=2 , ): lowerCAmelCase__ = parent lowerCAmelCase__ = batch_size lowerCAmelCase__ = image_size lowerCAmelCase__ = patch_size lowerCAmelCase__ = num_channels lowerCAmelCase__ = is_training lowerCAmelCase__ = use_labels lowerCAmelCase__ = hidden_size lowerCAmelCase__ = num_hidden_layers lowerCAmelCase__ = num_attention_heads lowerCAmelCase__ = intermediate_size lowerCAmelCase__ = hidden_act lowerCAmelCase__ = hidden_dropout_prob lowerCAmelCase__ = attention_probs_dropout_prob lowerCAmelCase__ = type_sequence_label_size lowerCAmelCase__ = initializer_range lowerCAmelCase__ = scope lowerCAmelCase__ = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) lowerCAmelCase__ = (image_size // patch_size) ** 2 lowerCAmelCase__ = num_patches + 2 def _SCREAMING_SNAKE_CASE ( self : Any ): lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ = None if self.use_labels: lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : List[Any] ): return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : List[str] ): lowerCAmelCase__ = TFDeiTModel(config=snake_case__ ) lowerCAmelCase__ = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict ): lowerCAmelCase__ = TFDeiTForMaskedImageModeling(config=snake_case__ ) lowerCAmelCase__ = model(snake_case__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCAmelCase__ = 1 lowerCAmelCase__ = TFDeiTForMaskedImageModeling(snake_case__ ) lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase__ = model(snake_case__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Tuple ): lowerCAmelCase__ = self.type_sequence_label_size lowerCAmelCase__ = TFDeiTForImageClassification(snake_case__ ) lowerCAmelCase__ = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCAmelCase__ = 1 lowerCAmelCase__ = TFDeiTForImageClassification(snake_case__ ) lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase__ = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): lowerCAmelCase__ = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs lowerCAmelCase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class a_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ): UpperCamelCase_ : Optional[Any] = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) UpperCamelCase_ : Any = ( { "feature-extraction": TFDeiTModel, "image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) UpperCamelCase_ : Optional[Any] = False UpperCamelCase_ : Optional[Any] = False UpperCamelCase_ : Optional[int] = False UpperCamelCase_ : int = False def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): lowerCAmelCase__ = TFDeiTModelTester(self ) lowerCAmelCase__ = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="""DeiT does not use inputs_embeds""" ) def _SCREAMING_SNAKE_CASE ( self : Any ): pass def _SCREAMING_SNAKE_CASE ( self : str ): lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ = model_class(snake_case__ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) lowerCAmelCase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case__ , tf.keras.layers.Dense ) ) def _SCREAMING_SNAKE_CASE ( self : Dict ): lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ = model_class(snake_case__ ) lowerCAmelCase__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ = [*signature.parameters.keys()] lowerCAmelCase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : int ): lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : int ): lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : Union[str, Any]=False ): lowerCAmelCase__ = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def _SCREAMING_SNAKE_CASE ( self : Any ): for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ = TFDeiTModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def _UpperCAmelCase ( ): """simple docstring""" lowerCAmelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class a_ ( unittest.TestCase ): @cached_property def _SCREAMING_SNAKE_CASE ( self : Any ): return ( DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ) if is_vision_available() else None ) @slow def _SCREAMING_SNAKE_CASE ( self : List[Any] ): lowerCAmelCase__ = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ) lowerCAmelCase__ = self.default_image_processor lowerCAmelCase__ = prepare_img() lowerCAmelCase__ = image_processor(images=snake_case__ , return_tensors="""tf""" ) # forward pass lowerCAmelCase__ = model(**snake_case__ ) # verify the logits lowerCAmelCase__ = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , snake_case__ ) lowerCAmelCase__ = tf.constant([-1.0266, 0.1912, -1.2861] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
674
0
"""simple docstring""" # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class a_ ( lowercase__ ): '''simple docstring''' UpperCamelCase_ : int = 42 UpperCamelCase_ : str = 42 class a_ ( lowercase__ , lowercase__ ): '''simple docstring''' UpperCamelCase_ : str = 1 @register_to_config def __init__( self : Dict , snake_case__ : int = 2000 , snake_case__ : float = 0.15 , snake_case__ : float = 0.01 , snake_case__ : float = 1348.0 , snake_case__ : float = 1E-5 , snake_case__ : int = 1 , ): # standard deviation of the initial noise distribution lowerCAmelCase__ = sigma_max # setable values lowerCAmelCase__ = None self.set_sigmas(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : torch.FloatTensor , snake_case__ : Optional[int] = None ): return sample def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : int , snake_case__ : float = None , snake_case__ : Union[str, torch.device] = None ): lowerCAmelCase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps lowerCAmelCase__ = torch.linspace(1 , snake_case__ , snake_case__ , device=snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : int , snake_case__ : float = None , snake_case__ : float = None , snake_case__ : float = None ): lowerCAmelCase__ = sigma_min if sigma_min is not None else self.config.sigma_min lowerCAmelCase__ = sigma_max if sigma_max is not None else self.config.sigma_max lowerCAmelCase__ = sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(snake_case__ , snake_case__ ) lowerCAmelCase__ = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) lowerCAmelCase__ = torch.exp(torch.linspace(math.log(snake_case__ ) , math.log(snake_case__ ) , snake_case__ ) ) lowerCAmelCase__ = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : str , snake_case__ : Tuple ): return torch.where( timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : Optional[torch.Generator] = None , snake_case__ : bool = True , ): if self.timesteps is None: raise ValueError( """`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" ) lowerCAmelCase__ = timestep * torch.ones( sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) lowerCAmelCase__ = (timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda lowerCAmelCase__ = timesteps.to(self.discrete_sigmas.device ) lowerCAmelCase__ = self.discrete_sigmas[timesteps].to(sample.device ) lowerCAmelCase__ = self.get_adjacent_sigma(snake_case__ , snake_case__ ).to(sample.device ) lowerCAmelCase__ = torch.zeros_like(snake_case__ ) lowerCAmelCase__ = (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods lowerCAmelCase__ = diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): lowerCAmelCase__ = diffusion.unsqueeze(-1 ) lowerCAmelCase__ = drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of lowerCAmelCase__ = randn_tensor( sample.shape , layout=sample.layout , generator=snake_case__ , device=sample.device , dtype=sample.dtype ) lowerCAmelCase__ = sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? lowerCAmelCase__ = prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=snake_case__ , prev_sample_mean=snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : torch.FloatTensor , snake_case__ : torch.FloatTensor , snake_case__ : Optional[torch.Generator] = None , snake_case__ : bool = True , ): if self.timesteps is None: raise ValueError( """`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction lowerCAmelCase__ = randn_tensor(sample.shape , layout=sample.layout , generator=snake_case__ ).to(sample.device ) # compute step size from the model_output, the noise, and the snr lowerCAmelCase__ = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean() lowerCAmelCase__ = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean() lowerCAmelCase__ = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 lowerCAmelCase__ = step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term lowerCAmelCase__ = step_size.flatten() while len(step_size.shape ) < len(sample.shape ): lowerCAmelCase__ = step_size.unsqueeze(-1 ) lowerCAmelCase__ = sample + step_size * model_output lowerCAmelCase__ = prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : torch.FloatTensor , snake_case__ : torch.FloatTensor , snake_case__ : torch.FloatTensor , ): # Make sure sigmas and timesteps have the same device and dtype as original_samples lowerCAmelCase__ = timesteps.to(original_samples.device ) lowerCAmelCase__ = self.discrete_sigmas.to(original_samples.device )[timesteps] lowerCAmelCase__ = ( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(snake_case__ ) * sigmas[:, None, None, None] ) lowerCAmelCase__ = noise + original_samples return noisy_samples def __len__( self : Dict ): return self.config.num_train_timesteps
704
"""simple docstring""" from __future__ import annotations from math import gcd def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 2 , lowerCamelCase__ = 1 , lowerCamelCase__ = 3 , ): """simple docstring""" if num < 2: raise ValueError("""The input value cannot be less than 2""" ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int: return (pow(lowerCamelCase__ , 2 ) + step) % modulus for _ in range(lowerCamelCase__ ): # These track the position within the cycle detection logic. lowerCAmelCase__ = seed lowerCAmelCase__ = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. lowerCAmelCase__ = gcd(hare - tortoise , lowerCamelCase__ ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. lowerCAmelCase__ = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse __lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( "num", type=int, help="The value to find a divisor of", ) parser.add_argument( "--attempts", type=int, default=3, help="The number of attempts before giving up", ) __lowerCAmelCase : List[str] = parser.parse_args() __lowerCAmelCase : Dict = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(F"{args.num} is probably prime") else: __lowerCAmelCase : List[str] = args.num // divisor print(F"{args.num} = {divisor} * {quotient}")
674
0
"""simple docstring""" import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def _UpperCAmelCase ( lowerCAmelCase_ ): """simple docstring""" if ( (cp >= 0X4e00 and cp <= 0X9fff) or (cp >= 0X3400 and cp <= 0X4dbf) # or (cp >= 0X20000 and cp <= 0X2a6df) # or (cp >= 0X2a700 and cp <= 0X2b73f) # or (cp >= 0X2b740 and cp <= 0X2b81f) # or (cp >= 0X2b820 and cp <= 0X2ceaf) # or (cp >= 0Xf900 and cp <= 0Xfaff) or (cp >= 0X2f800 and cp <= 0X2fa1f) # ): # return True return False def _UpperCAmelCase ( lowerCAmelCase_ ): """simple docstring""" for char in word: lowerCAmelCase__ = ord(lowerCAmelCase_ ) if not _is_chinese_char(lowerCAmelCase_ ): return 0 return 1 def _UpperCAmelCase ( lowerCAmelCase_ ): """simple docstring""" lowerCAmelCase__ = set() for token in tokens: lowerCAmelCase__ = len(lowerCAmelCase_ ) > 1 and is_chinese(lowerCAmelCase_ ) if chinese_word: word_set.add(lowerCAmelCase_ ) lowerCAmelCase__ = list(lowerCAmelCase_ ) return word_list def _UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if not chinese_word_set: return bert_tokens lowerCAmelCase__ = max([len(lowerCAmelCase_ ) for w in chinese_word_set] ) lowerCAmelCase__ = bert_tokens lowerCAmelCase__ = 0, len(lowerCAmelCase_ ) while start < end: lowerCAmelCase__ = True if is_chinese(bert_word[start] ): lowerCAmelCase__ = min(end - start , lowerCAmelCase_ ) for i in range(lowerCAmelCase_ , 1 , -1 ): lowerCAmelCase__ = """""".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): lowerCAmelCase__ = """##""" + bert_word[j] lowerCAmelCase__ = start + i lowerCAmelCase__ = False break if single_word: start += 1 return bert_word def _UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" lowerCAmelCase__ = [] for i in range(0 , len(lowerCAmelCase_ ) , 100 ): lowerCAmelCase__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws lowerCAmelCase__ = [get_chinese_word(lowerCAmelCase_ ) for r in res] ltp_res.extend(lowerCAmelCase_ ) assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) lowerCAmelCase__ = [] for i in range(0 , len(lowerCAmelCase_ ) , 100 ): lowerCAmelCase__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=512 ) bert_res.extend(res["""input_ids"""] ) assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) lowerCAmelCase__ = [] for input_ids, chinese_word in zip(lowerCAmelCase_ , lowerCAmelCase_ ): lowerCAmelCase__ = [] for id in input_ids: lowerCAmelCase__ = bert_tokenizer._convert_id_to_token(lowerCAmelCase_ ) input_tokens.append(lowerCAmelCase_ ) lowerCAmelCase__ = add_sub_symbol(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase__ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(lowerCAmelCase_ ): if token[:2] == "##": lowerCAmelCase__ = token[2:] # save chinese tokens' pos if len(lowerCAmelCase_ ) == 1 and _is_chinese_char(ord(lowerCAmelCase_ ) ): ref_id.append(lowerCAmelCase_ ) ref_ids.append(lowerCAmelCase_ ) assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) return ref_ids def _UpperCAmelCase ( lowerCAmelCase_ ): """simple docstring""" with open(args.file_name , """r""" , encoding="""utf-8""" ) as f: lowerCAmelCase__ = f.readlines() lowerCAmelCase__ = [line.strip() for line in data if len(lowerCAmelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' lowerCAmelCase__ = LTP(args.ltp ) # faster in GPU device lowerCAmelCase__ = BertTokenizer.from_pretrained(args.bert ) lowerCAmelCase__ = prepare_ref(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) with open(args.save_path , """w""" , encoding="""utf-8""" ) as f: lowerCAmelCase__ = [json.dumps(lowerCAmelCase_ ) + """\n""" for ref in ref_ids] f.writelines(lowerCAmelCase_ ) if __name__ == "__main__": __lowerCAmelCase : List[str] = argparse.ArgumentParser(description="prepare_chinese_ref") parser.add_argument( "--file_name", required=False, type=str, default="./resources/chinese-demo.txt", help="file need process, same as training data in lm", ) parser.add_argument( "--ltp", required=False, type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path", ) parser.add_argument( "--bert", required=False, type=str, default="./resources/robert", help="resources for Bert tokenizer", ) parser.add_argument( "--save_path", required=False, type=str, default="./resources/ref.txt", help="path to save res", ) __lowerCAmelCase : List[str] = parser.parse_args() main(args)
705
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = TapasConfig.from_json_file(lowerCamelCase__ ) # set absolute/relative position embeddings parameter lowerCAmelCase__ = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ ) elif task == "WTQ": # run_task_main.py hparams lowerCAmelCase__ = 4 lowerCAmelCase__ = True # hparam_utils.py hparams lowerCAmelCase__ = 0.66_46_94 lowerCAmelCase__ = 0.20_79_51 lowerCAmelCase__ = 0.12_11_94 lowerCAmelCase__ = True lowerCAmelCase__ = True lowerCAmelCase__ = False lowerCAmelCase__ = 0.0_35_25_13 lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams lowerCAmelCase__ = 4 lowerCAmelCase__ = False # hparam_utils.py hparams lowerCAmelCase__ = 36.45_19 lowerCAmelCase__ = 0.90_34_21 lowerCAmelCase__ = 2_22.0_88 lowerCAmelCase__ = True lowerCAmelCase__ = True lowerCAmelCase__ = True lowerCAmelCase__ = 0.76_31_41 lowerCAmelCase__ = TapasForQuestionAnswering(config=lowerCamelCase__ ) elif task == "TABFACT": lowerCAmelCase__ = TapasForSequenceClassification(config=lowerCamelCase__ ) elif task == "MLM": lowerCAmelCase__ = TapasForMaskedLM(config=lowerCamelCase__ ) elif task == "INTERMEDIATE_PRETRAINING": lowerCAmelCase__ = TapasModel(config=lowerCamelCase__ ) else: raise ValueError(f"""Task {task} not supported.""" ) print(f"""Building PyTorch model from configuration: {config}""" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # Save pytorch-model (weights and configuration) print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(lowerCamelCase__ ) # Save tokenizer files print(f"""Save tokenizer files to {pytorch_dump_path}""" ) lowerCAmelCase__ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 ) tokenizer.save_pretrained(lowerCamelCase__ ) print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": __lowerCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA." ) parser.add_argument( "--reset_position_index_per_cell", default=False, action="store_true", help="Whether to use relative position embeddings or not. Defaults to True.", ) parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--tapas_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained TAPAS model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __lowerCAmelCase : Union[str, Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
674
0
"""simple docstring""" from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration __lowerCAmelCase : str = HfArgumentParser(InitializationArguments) __lowerCAmelCase : str = parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization __lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks __lowerCAmelCase : str = { "vocab_size": len(tokenizer), "scale_attn_by_inverse_layer_idx": True, "reorder_and_upcast_attn": True, } # Load model config (GPT-2 large in this case) __lowerCAmelCase : int = AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config __lowerCAmelCase : Any = AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
706
"""simple docstring""" def _UpperCAmelCase ( lowerCamelCase__ = 50 ): """simple docstring""" lowerCAmelCase__ = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(F"{solution() = }")
674
0
"""simple docstring""" import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class a_ ( unittest.TestCase ): @parameterized.expand([(None,), ("""foo.json""",)] ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : List[Any] ): lowerCAmelCase__ = GenerationConfig( do_sample=__lowerCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__lowerCAmelCase , config_name=__lowerCAmelCase ) lowerCAmelCase__ = GenerationConfig.from_pretrained(__lowerCAmelCase , config_name=__lowerCAmelCase ) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , __lowerCAmelCase ) self.assertEqual(loaded_config.temperature , 0.7 ) self.assertEqual(loaded_config.length_penalty , 1.0 ) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] ) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50 ) self.assertEqual(loaded_config.max_length , 20 ) self.assertEqual(loaded_config.max_time , __lowerCAmelCase ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): lowerCAmelCase__ = AutoConfig.from_pretrained("""gpt2""" ) lowerCAmelCase__ = GenerationConfig.from_model_config(__lowerCAmelCase ) lowerCAmelCase__ = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase ) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id ) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): lowerCAmelCase__ = GenerationConfig() lowerCAmelCase__ = { """max_new_tokens""": 1024, """foo""": """bar""", } lowerCAmelCase__ = copy.deepcopy(__lowerCAmelCase ) lowerCAmelCase__ = generation_config.update(**__lowerCAmelCase ) # update_kwargs was not modified (no side effects) self.assertEqual(__lowerCAmelCase , __lowerCAmelCase ) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1024 ) # `.update()` returns a dictionary of unused kwargs self.assertEqual(__lowerCAmelCase , {"""foo""": """bar"""} ) def _SCREAMING_SNAKE_CASE ( self : str ): lowerCAmelCase__ = GenerationConfig() lowerCAmelCase__ = """bar""" with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir: generation_config.save_pretrained(__lowerCAmelCase ) lowerCAmelCase__ = GenerationConfig.from_pretrained(__lowerCAmelCase ) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , """bar""" ) lowerCAmelCase__ = GenerationConfig.from_model_config(__lowerCAmelCase ) assert not hasattr(__lowerCAmelCase , """foo""" ) # no new kwargs should be initialized if from config def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): lowerCAmelCase__ = GenerationConfig() self.assertEqual(default_config.temperature , 1.0 ) self.assertEqual(default_config.do_sample , __lowerCAmelCase ) self.assertEqual(default_config.num_beams , 1 ) lowerCAmelCase__ = GenerationConfig( do_sample=__lowerCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7 ) self.assertEqual(config.do_sample , __lowerCAmelCase ) self.assertEqual(config.num_beams , 1 ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__lowerCAmelCase ) lowerCAmelCase__ = GenerationConfig.from_pretrained(__lowerCAmelCase , temperature=1.0 ) self.assertEqual(loaded_config.temperature , 1.0 ) self.assertEqual(loaded_config.do_sample , __lowerCAmelCase ) self.assertEqual(loaded_config.num_beams , 1 ) # default value @is_staging_test class a_ ( unittest.TestCase ): @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ): lowerCAmelCase__ = TOKEN HfFolder.save_token(__lowerCAmelCase ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ): try: delete_repo(token=cls._token , repo_id="""test-generation-config""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" ) except HTTPError: pass def _SCREAMING_SNAKE_CASE ( self : Tuple ): lowerCAmelCase__ = GenerationConfig( do_sample=__lowerCAmelCase , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("""test-generation-config""" , use_auth_token=self._token ) lowerCAmelCase__ = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) ) # Reset repo delete_repo(token=self._token , repo_id="""test-generation-config""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( __lowerCAmelCase , repo_id="""test-generation-config""" , push_to_hub=__lowerCAmelCase , use_auth_token=self._token ) lowerCAmelCase__ = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) ) def _SCREAMING_SNAKE_CASE ( self : Tuple ): lowerCAmelCase__ = GenerationConfig( do_sample=__lowerCAmelCase , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token ) lowerCAmelCase__ = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( __lowerCAmelCase , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=__lowerCAmelCase , use_auth_token=self._token ) lowerCAmelCase__ = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
707
"""simple docstring""" import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse("0.8.3"): raise Exception("requires gluonnlp == 0.8.3") if version.parse(mx.__version__) != version.parse("1.5.0"): raise Exception("requires mxnet == 1.5.0") logging.set_verbosity_info() __lowerCAmelCase : Any = logging.get_logger(__name__) __lowerCAmelCase : Any = "The Nymphenburg Palace is a beautiful palace in Munich!" def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = { """attention_cell""": """multi_head""", """num_layers""": 4, """units""": 1024, """hidden_size""": 768, """max_length""": 512, """num_heads""": 8, """scaled""": True, """dropout""": 0.1, """use_residual""": True, """embed_size""": 1024, """embed_dropout""": 0.1, """word_embed""": None, """layer_norm_eps""": 1e-5, """token_type_vocab_size""": 2, } lowerCAmelCase__ = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py lowerCAmelCase__ = BERTEncoder( attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=lowerCamelCase__ , output_all_encodings=lowerCamelCase__ , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , lowerCamelCase__ ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later lowerCAmelCase__ = """openwebtext_ccnews_stories_books_cased""" # Specify download folder to Gluonnlp's vocab lowerCAmelCase__ = os.path.join(get_home_dir() , """models""" ) lowerCAmelCase__ = _load_vocab(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , cls=lowerCamelCase__ ) lowerCAmelCase__ = nlp.model.BERTModel( lowerCamelCase__ , len(lowerCamelCase__ ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=lowerCamelCase__ , use_token_type_embed=lowerCamelCase__ , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=lowerCamelCase__ , use_decoder=lowerCamelCase__ , ) original_bort.load_parameters(lowerCamelCase__ , cast_dtype=lowerCamelCase__ , ignore_extra=lowerCamelCase__ ) lowerCAmelCase__ = original_bort._collect_params_with_prefix() # Build our config 🤗 lowerCAmelCase__ = { """architectures""": ["""BertForMaskedLM"""], """attention_probs_dropout_prob""": predefined_args["""dropout"""], """hidden_act""": """gelu""", """hidden_dropout_prob""": predefined_args["""dropout"""], """hidden_size""": predefined_args["""embed_size"""], """initializer_range""": 0.02, """intermediate_size""": predefined_args["""hidden_size"""], """layer_norm_eps""": predefined_args["""layer_norm_eps"""], """max_position_embeddings""": predefined_args["""max_length"""], """model_type""": """bort""", """num_attention_heads""": predefined_args["""num_heads"""], """num_hidden_layers""": predefined_args["""num_layers"""], """pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa """type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa """vocab_size""": len(lowerCamelCase__ ), } lowerCAmelCase__ = BertConfig.from_dict(lowerCamelCase__ ) lowerCAmelCase__ = BertForMaskedLM(lowerCamelCase__ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(lowerCamelCase__ ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(lowerCamelCase__ , lowerCamelCase__ ): lowerCAmelCase__ = hf_param.shape lowerCAmelCase__ = to_torch(params[gluon_param] ) lowerCAmelCase__ = gluon_param.shape assert ( shape_hf == shape_gluon ), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers""" return gluon_param lowerCAmelCase__ = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" ) lowerCAmelCase__ = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" ) lowerCAmelCase__ = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" ) lowerCAmelCase__ = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) lowerCAmelCase__ = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): lowerCAmelCase__ = hf_bort_model.bert.encoder.layer[i] # self attention lowerCAmelCase__ = layer.attention.self lowerCAmelCase__ = check_and_map_params( self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" ) lowerCAmelCase__ = check_and_map_params( self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" ) lowerCAmelCase__ = check_and_map_params( self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" ) lowerCAmelCase__ = check_and_map_params( self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" ) lowerCAmelCase__ = check_and_map_params( self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" ) lowerCAmelCase__ = check_and_map_params( self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" ) # self attention output lowerCAmelCase__ = layer.attention.output lowerCAmelCase__ = check_and_map_params( self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" ) lowerCAmelCase__ = check_and_map_params( self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" ) lowerCAmelCase__ = check_and_map_params( self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" ) lowerCAmelCase__ = check_and_map_params( self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" ) # intermediate lowerCAmelCase__ = layer.intermediate lowerCAmelCase__ = check_and_map_params( intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" ) lowerCAmelCase__ = check_and_map_params( intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" ) # output lowerCAmelCase__ = layer.output lowerCAmelCase__ = check_and_map_params( bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" ) lowerCAmelCase__ = check_and_map_params( bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" ) lowerCAmelCase__ = check_and_map_params( bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" ) lowerCAmelCase__ = check_and_map_params( bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models lowerCAmelCase__ = RobertaTokenizer.from_pretrained("""roberta-base""" ) lowerCAmelCase__ = tokenizer.encode_plus(lowerCamelCase__ )["""input_ids"""] # Get gluon output lowerCAmelCase__ = mx.nd.array([input_ids] ) lowerCAmelCase__ = original_bort(inputs=lowerCamelCase__ , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(lowerCamelCase__ ) lowerCAmelCase__ = BertModel.from_pretrained(lowerCamelCase__ ) hf_bort_model.eval() lowerCAmelCase__ = tokenizer.encode_plus(lowerCamelCase__ , return_tensors="""pt""" ) lowerCAmelCase__ = hf_bort_model(**lowerCamelCase__ )[0] lowerCAmelCase__ = output_gluon[0].asnumpy() lowerCAmelCase__ = output_hf[0].detach().numpy() lowerCAmelCase__ = np.max(np.abs(hf_layer - gluon_layer ) ).item() lowerCAmelCase__ = np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) if success: print("""✔️ Both model do output the same tensors""" ) else: print("""❌ Both model do **NOT** output the same tensors""" ) print("""Absolute difference is:""" , lowerCamelCase__ ) if __name__ == "__main__": __lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __lowerCAmelCase : str = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
674
0
"""simple docstring""" import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors lowerCAmelCase__ = load_file(UpperCAmelCase__ ) lowerCAmelCase__ = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: lowerCAmelCase__ = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" ) lowerCAmelCase__ = pipeline.text_encoder else: lowerCAmelCase__ = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" ) lowerCAmelCase__ = pipeline.unet # find the target layer lowerCAmelCase__ = layer_infos.pop(0 ) while len(UpperCAmelCase__ ) > -1: try: lowerCAmelCase__ = curr_layer.__getattr__(UpperCAmelCase__ ) if len(UpperCAmelCase__ ) > 0: lowerCAmelCase__ = layer_infos.pop(0 ) elif len(UpperCAmelCase__ ) == 0: break except Exception: if len(UpperCAmelCase__ ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: lowerCAmelCase__ = layer_infos.pop(0 ) lowerCAmelCase__ = [] if "lora_down" in key: pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) ) pair_keys.append(UpperCAmelCase__ ) else: pair_keys.append(UpperCAmelCase__ ) pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: lowerCAmelCase__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) lowerCAmelCase__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(UpperCAmelCase__ , UpperCAmelCase__ ).unsqueeze(2 ).unsqueeze(3 ) else: lowerCAmelCase__ = state_dict[pair_keys[0]].to(torch.floataa ) lowerCAmelCase__ = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(UpperCAmelCase__ , UpperCAmelCase__ ) # update visited list for item in pair_keys: visited.append(UpperCAmelCase__ ) return pipeline if __name__ == "__main__": __lowerCAmelCase : List[Any] = argparse.ArgumentParser() parser.add_argument( "--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format." ) parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument( "--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors" ) parser.add_argument( "--lora_prefix_text_encoder", default="lora_te", type=str, help="The prefix of text encoder weight in safetensors", ) parser.add_argument("--alpha", default=0.75, type=float, help="The merging ratio in W = W0 + alpha * deltaW") parser.add_argument( "--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not." ) parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)") __lowerCAmelCase : List[str] = parser.parse_args() __lowerCAmelCase : Tuple = args.base_model_path __lowerCAmelCase : Dict = args.checkpoint_path __lowerCAmelCase : Union[str, Any] = args.dump_path __lowerCAmelCase : Dict = args.lora_prefix_unet __lowerCAmelCase : Tuple = args.lora_prefix_text_encoder __lowerCAmelCase : List[str] = args.alpha __lowerCAmelCase : Dict = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) __lowerCAmelCase : str = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
708
"""simple docstring""" import copy import os import cva import numpy as np from matplotlib import pyplot as plt class a_ : def __init__( self : Optional[int] ): lowerCAmelCase__ = """""" lowerCAmelCase__ = """""" lowerCAmelCase__ = [] lowerCAmelCase__ = 0 lowerCAmelCase__ = 256 lowerCAmelCase__ = 0 lowerCAmelCase__ = 0 lowerCAmelCase__ = 0 lowerCAmelCase__ = 0 def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Union[str, Any] ): lowerCAmelCase__ = cva.imread(snake_case__ , 0 ) lowerCAmelCase__ = copy.deepcopy(self.img ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = plt.hist(self.img.ravel() , 256 , [0, 256] , label="""x""" ) lowerCAmelCase__ = np.sum(snake_case__ ) for i in range(len(snake_case__ ) ): lowerCAmelCase__ = x[i] / self.k self.sk += prk lowerCAmelCase__ = (self.L - 1) * self.sk if self.rem != 0: lowerCAmelCase__ = int(last % last ) lowerCAmelCase__ = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(snake_case__ ) lowerCAmelCase__ = int(np.ma.count(self.img ) / self.img[1].size ) lowerCAmelCase__ = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): lowerCAmelCase__ = self.img[j][i] if num != self.last_list[num]: lowerCAmelCase__ = self.last_list[num] cva.imwrite("""output_data/output.jpg""" , self.img ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): plt.hist(self.img.ravel() , 256 , [0, 256] ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): cva.imshow("""Output-Image""" , self.img ) cva.imshow("""Input-Image""" , self.original_image ) cva.waitKey(5000 ) cva.destroyAllWindows() if __name__ == "__main__": __lowerCAmelCase : Dict = os.path.join(os.path.basename(__file__), "image_data/input.jpg") __lowerCAmelCase : Optional[int] = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
674
0
"""simple docstring""" import operator as op def _UpperCAmelCase ( lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = [] lowerCAmelCase__ = lambda lowerCamelCase__ , lowerCamelCase__ : int(x / y ) # noqa: E731 integer division operation lowerCAmelCase__ = { "^": op.pow, "*": op.mul, "/": div, "+": op.add, "-": op.sub, } # operators & their respective operation # print table header print("""Symbol""".center(8 ) , """Action""".center(12 ) , """Stack""" , sep=""" | """ ) print("""-""" * (30 + len(_lowercase )) ) for x in post_fix: if x.isdigit(): # if x in digit stack.append(_lowercase ) # append x to stack # output in tabular format print(x.rjust(8 ) , ("""push(""" + x + """)""").ljust(12 ) , """,""".join(_lowercase ) , sep=""" | """ ) else: lowerCAmelCase__ = stack.pop() # pop stack # output in tabular format print("""""".rjust(8 ) , ("""pop(""" + b + """)""").ljust(12 ) , """,""".join(_lowercase ) , sep=""" | """ ) lowerCAmelCase__ = stack.pop() # pop stack # output in tabular format print("""""".rjust(8 ) , ("""pop(""" + a + """)""").ljust(12 ) , """,""".join(_lowercase ) , sep=""" | """ ) stack.append( str(opr[x](int(_lowercase ) , int(_lowercase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8 ) , ("""push(""" + a + x + b + """)""").ljust(12 ) , """,""".join(_lowercase ) , sep=""" | """ , ) return int(stack[0] ) if __name__ == "__main__": __lowerCAmelCase : Optional[Any] = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ") print("\n\tResult = ", solve(Postfix))
709
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class a_ ( __UpperCamelCase ): UpperCamelCase_ : List[str] = ( "This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image." "It takes two arguments named `image` which should be the original image, and `label` which should be a text " "describing the elements what should be identified in the segmentation mask. The tool returns the mask." ) UpperCamelCase_ : str = "CIDAS/clipseg-rd64-refined" UpperCamelCase_ : Any = "image_segmenter" UpperCamelCase_ : Optional[Any] = CLIPSegForImageSegmentation UpperCamelCase_ : List[str] = ["image", "text"] UpperCamelCase_ : int = ["image"] def __init__( self : Tuple , *snake_case__ : str , **snake_case__ : Optional[Any] ): requires_backends(self , ["""vision"""] ) super().__init__(*snake_case__ , **snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : "Image" , snake_case__ : str ): return self.pre_processor(text=[label] , images=[image] , padding=snake_case__ , return_tensors="""pt""" ) def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Tuple ): with torch.no_grad(): lowerCAmelCase__ = self.model(**snake_case__ ).logits return logits def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : List[Any] ): lowerCAmelCase__ = outputs.cpu().detach().numpy() lowerCAmelCase__ = 0 lowerCAmelCase__ = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
674
0