code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' lowerCamelCase = JukeboxTokenizer lowerCamelCase = { '''artist''': '''Zac Brown Band''', '''genres''': '''Country''', '''lyrics''': '''I met a traveller from an antique land, Who said "Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away ''', } @require_torch def _lowerCAmelCase ( self ) -> str: import torch _lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" ) _lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""] # fmt: off _lowerCAmelCase =[ torch.tensor([[ 0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def _lowerCAmelCase ( self ) -> Any: import torch _lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" ) _lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""] # fmt: off _lowerCAmelCase =[ torch.tensor([[ 0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
341
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __A = { 'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'], 'tokenization_perceiver': ['PerceiverTokenizer'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['PerceiverFeatureExtractor'] __A = ['PerceiverImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PerceiverForImageClassificationConvProcessing', 'PerceiverForImageClassificationFourier', 'PerceiverForImageClassificationLearned', 'PerceiverForMaskedLM', 'PerceiverForMultimodalAutoencoding', 'PerceiverForOpticalFlow', 'PerceiverForSequenceClassification', 'PerceiverLayer', 'PerceiverModel', 'PerceiverPreTrainedModel', ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
1
"""simple docstring""" import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder __A = '__DUMMY_TRANSFORMERS_USER__' __A = 'Dummy User' __A = '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' __A = 'https://hub-ci.huggingface.co' __A = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}' __A = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}' __A = Path('~/.huggingface/hub_ci_token').expanduser() @pytest.fixture def _lowerCamelCase(__UpperCamelCase ) -> Optional[int]: monkeypatch.setattr( """huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , __UpperCamelCase ) @pytest.fixture def _lowerCamelCase(__UpperCamelCase ) -> Dict: monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , __UpperCamelCase ) monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , __UpperCamelCase ) @pytest.fixture def _lowerCamelCase(__UpperCamelCase ) -> Optional[int]: monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , __UpperCamelCase ) @pytest.fixture def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]: HfFolder.save_token(__UpperCamelCase ) yield HfFolder.delete_token() @pytest.fixture(scope="""session""" ) def _lowerCamelCase() -> Union[str, Any]: return HfApi(endpoint=__UpperCamelCase ) @pytest.fixture(scope="""session""" ) def _lowerCamelCase(__UpperCamelCase ) -> str: _lowerCAmelCase =HfFolder.get_token() HfFolder.save_token(__UpperCamelCase ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(__UpperCamelCase ) @pytest.fixture def _lowerCamelCase(__UpperCamelCase ) -> List[str]: def _cleanup_repo(__UpperCamelCase ): hf_api.delete_repo(__UpperCamelCase , token=__UpperCamelCase , repo_type="""dataset""" ) return _cleanup_repo @pytest.fixture def _lowerCamelCase(__UpperCamelCase ) -> Dict: @contextmanager def _temporary_repo(__UpperCamelCase ): try: yield repo_id finally: cleanup_repo(__UpperCamelCase ) return _temporary_repo @pytest.fixture(scope="""session""" ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any: _lowerCAmelCase =F'''repo_txt_data-{int(time.time() * 1_0E3 )}''' _lowerCAmelCase =F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(__UpperCamelCase , token=__UpperCamelCase , repo_type="""dataset""" , private=__UpperCamelCase ) hf_api.upload_file( token=__UpperCamelCase , path_or_fileobj=str(__UpperCamelCase ) , path_in_repo="""data/text_data.txt""" , repo_id=__UpperCamelCase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(__UpperCamelCase , token=__UpperCamelCase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict: return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="""session""" ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any: _lowerCAmelCase =F'''repo_zipped_txt_data-{int(time.time() * 1_0E3 )}''' _lowerCAmelCase =F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(__UpperCamelCase , token=__UpperCamelCase , repo_type="""dataset""" , private=__UpperCamelCase ) hf_api.upload_file( token=__UpperCamelCase , path_or_fileobj=str(__UpperCamelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCamelCase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(__UpperCamelCase , token=__UpperCamelCase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]: return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="""session""" ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int: _lowerCAmelCase =F'''repo_zipped_img_data-{int(time.time() * 1_0E3 )}''' _lowerCAmelCase =F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(__UpperCamelCase , token=__UpperCamelCase , repo_type="""dataset""" , private=__UpperCamelCase ) hf_api.upload_file( token=__UpperCamelCase , path_or_fileobj=str(__UpperCamelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCamelCase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(__UpperCamelCase , token=__UpperCamelCase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]: return hf_private_dataset_repo_zipped_img_data_
341
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = { 'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Swinv2ForImageClassification', 'Swinv2ForMaskedImageModeling', 'Swinv2Model', 'Swinv2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
1
"""simple docstring""" import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' def __init__( self ) -> Optional[Any]: _lowerCAmelCase =[] def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: self.events.append("""on_init_end""" ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> Any: self.events.append("""on_train_begin""" ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> Any: self.events.append("""on_train_end""" ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> str: self.events.append("""on_epoch_begin""" ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> int: self.events.append("""on_epoch_end""" ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> Dict: self.events.append("""on_step_begin""" ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> Dict: self.events.append("""on_step_end""" ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: self.events.append("""on_evaluate""" ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: self.events.append("""on_predict""" ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: self.events.append("""on_save""" ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> Any: self.events.append("""on_log""" ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: self.events.append("""on_prediction_step""" ) @require_torch class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> List[str]: _lowerCAmelCase =tempfile.mkdtemp() def _lowerCAmelCase ( self ) -> Any: shutil.rmtree(self.output_dir ) def _lowerCAmelCase ( self , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=64 , __UpperCAmelCase=64 , __UpperCAmelCase=None , __UpperCAmelCase=False , **__UpperCAmelCase ) -> List[Any]: # disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure # its set to False since the tests later on depend on its value. _lowerCAmelCase =RegressionDataset(length=__UpperCAmelCase ) _lowerCAmelCase =RegressionDataset(length=__UpperCAmelCase ) _lowerCAmelCase =RegressionModelConfig(a=__UpperCAmelCase , b=__UpperCAmelCase ) _lowerCAmelCase =RegressionPreTrainedModel(__UpperCAmelCase ) _lowerCAmelCase =TrainingArguments(self.output_dir , disable_tqdm=__UpperCAmelCase , report_to=[] , **__UpperCAmelCase ) return Trainer( __UpperCAmelCase , __UpperCAmelCase , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , callbacks=__UpperCAmelCase , ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any: self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) ) # Order doesn't matter _lowerCAmelCase =sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : cb.__name__ if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cb.__class__.__name__ ) _lowerCAmelCase =sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : cb.__name__ if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cb.__class__.__name__ ) for cba, cba in zip(__UpperCAmelCase , __UpperCAmelCase ): if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(__UpperCAmelCase , __UpperCAmelCase ): self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not isinstance(__UpperCAmelCase , __UpperCAmelCase ): self.assertEqual(__UpperCAmelCase , cba.__class__ ) elif not isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(__UpperCAmelCase , __UpperCAmelCase ): self.assertEqual(cba.__class__ , __UpperCAmelCase ) else: self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]: _lowerCAmelCase =["""on_init_end""", """on_train_begin"""] _lowerCAmelCase =0 _lowerCAmelCase =len(trainer.get_eval_dataloader() ) _lowerCAmelCase =["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""] for _ in range(trainer.state.num_train_epochs ): expected_events.append("""on_epoch_begin""" ) for _ in range(__UpperCAmelCase ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append("""on_log""" ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append("""on_save""" ) expected_events.append("""on_epoch_end""" ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =self.get_trainer() _lowerCAmelCase =DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase ) # Callbacks passed at init are added to the default callbacks _lowerCAmelCase =self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(__UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback _lowerCAmelCase =self.get_trainer(disable_tqdm=__UpperCAmelCase ) _lowerCAmelCase =DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =DEFAULT_CALLBACKS.copy() + [ProgressCallback] _lowerCAmelCase =self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(__UpperCAmelCase ) expected_callbacks.remove(__UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase ) _lowerCAmelCase =self.get_trainer() _lowerCAmelCase =trainer.pop_callback(__UpperCAmelCase ) self.assertEqual(cb.__class__ , __UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase ) trainer.add_callback(__UpperCAmelCase ) expected_callbacks.insert(0 , __UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase ) # We can also add, pop, or remove by instance _lowerCAmelCase =self.get_trainer() _lowerCAmelCase =trainer.callback_handler.callbacks[0] trainer.remove_callback(__UpperCAmelCase ) expected_callbacks.remove(__UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase ) _lowerCAmelCase =self.get_trainer() _lowerCAmelCase =trainer.callback_handler.callbacks[0] _lowerCAmelCase =trainer.pop_callback(__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase ) trainer.add_callback(__UpperCAmelCase ) expected_callbacks.insert(0 , __UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks , __UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Optional[Any]: import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action="""ignore""" , category=__UpperCAmelCase ) _lowerCAmelCase =self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() _lowerCAmelCase =trainer.callback_handler.callbacks[-2].events self.assertEqual(__UpperCAmelCase , self.get_expected_events(__UpperCAmelCase ) ) # Independent log/save/eval _lowerCAmelCase =self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 ) trainer.train() _lowerCAmelCase =trainer.callback_handler.callbacks[-2].events self.assertEqual(__UpperCAmelCase , self.get_expected_events(__UpperCAmelCase ) ) _lowerCAmelCase =self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 ) trainer.train() _lowerCAmelCase =trainer.callback_handler.callbacks[-2].events self.assertEqual(__UpperCAmelCase , self.get_expected_events(__UpperCAmelCase ) ) _lowerCAmelCase =self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" ) trainer.train() _lowerCAmelCase =trainer.callback_handler.callbacks[-2].events self.assertEqual(__UpperCAmelCase , self.get_expected_events(__UpperCAmelCase ) ) _lowerCAmelCase =self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" ) trainer.train() _lowerCAmelCase =trainer.callback_handler.callbacks[-2].events self.assertEqual(__UpperCAmelCase , self.get_expected_events(__UpperCAmelCase ) ) # A bit of everything _lowerCAmelCase =self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , ) trainer.train() _lowerCAmelCase =trainer.callback_handler.callbacks[-2].events self.assertEqual(__UpperCAmelCase , self.get_expected_events(__UpperCAmelCase ) ) # warning should be emitted for duplicated callbacks with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock: _lowerCAmelCase =self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(__UpperCAmelCase ) in warn_mock.call_args[0][0]
341
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=1 ) -> Tuple: if n_shave_prefix_segments >= 0: return ".".join(path.split(""".""" )[n_shave_prefix_segments:] ) else: return ".".join(path.split(""".""" )[:n_shave_prefix_segments] ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> List[str]: _lowerCAmelCase =[] for old_item in old_list: _lowerCAmelCase =old_item.replace("""in_layers.0""" , """norm1""" ) _lowerCAmelCase =new_item.replace("""in_layers.2""" , """conv1""" ) _lowerCAmelCase =new_item.replace("""out_layers.0""" , """norm2""" ) _lowerCAmelCase =new_item.replace("""out_layers.3""" , """conv2""" ) _lowerCAmelCase =new_item.replace("""emb_layers.1""" , """time_emb_proj""" ) _lowerCAmelCase =new_item.replace("""skip_connection""" , """conv_shortcut""" ) _lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> Tuple: _lowerCAmelCase =[] for old_item in old_list: _lowerCAmelCase =old_item _lowerCAmelCase =new_item.replace("""norm.weight""" , """group_norm.weight""" ) _lowerCAmelCase =new_item.replace("""norm.bias""" , """group_norm.bias""" ) _lowerCAmelCase =new_item.replace("""proj_out.weight""" , """proj_attn.weight""" ) _lowerCAmelCase =new_item.replace("""proj_out.bias""" , """proj_attn.bias""" ) _lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None ) -> Optional[int]: assert isinstance(__UpperCamelCase , __UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): _lowerCAmelCase =old_checkpoint[path] _lowerCAmelCase =old_tensor.shape[0] // 3 _lowerCAmelCase =(-1, channels) if len(old_tensor.shape ) == 3 else (-1) _lowerCAmelCase =old_tensor.shape[0] // config["""num_head_channels"""] // 3 _lowerCAmelCase =old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =old_tensor.split(channels // num_heads , dim=1 ) _lowerCAmelCase =query.reshape(__UpperCamelCase ) _lowerCAmelCase =key.reshape(__UpperCamelCase ) _lowerCAmelCase =value.reshape(__UpperCamelCase ) for path in paths: _lowerCAmelCase =path["""new"""] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here _lowerCAmelCase =new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" ) _lowerCAmelCase =new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" ) _lowerCAmelCase =new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" ) if additional_replacements is not None: for replacement in additional_replacements: _lowerCAmelCase =new_path.replace(replacement["""old"""] , replacement["""new"""] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: _lowerCAmelCase =old_checkpoint[path["""old"""]][:, :, 0] else: _lowerCAmelCase =old_checkpoint[path["""old"""]] def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: _lowerCAmelCase ={} _lowerCAmelCase =checkpoint["""time_embed.0.weight"""] _lowerCAmelCase =checkpoint["""time_embed.0.bias"""] _lowerCAmelCase =checkpoint["""time_embed.2.weight"""] _lowerCAmelCase =checkpoint["""time_embed.2.bias"""] _lowerCAmelCase =checkpoint["""input_blocks.0.0.weight"""] _lowerCAmelCase =checkpoint["""input_blocks.0.0.bias"""] _lowerCAmelCase =checkpoint["""out.0.weight"""] _lowerCAmelCase =checkpoint["""out.0.bias"""] _lowerCAmelCase =checkpoint["""out.2.weight"""] _lowerCAmelCase =checkpoint["""out.2.bias"""] # Retrieves the keys for the input blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } # Retrieves the keys for the middle blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } # Retrieves the keys for the output blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } for i in range(1 , __UpperCamelCase ): _lowerCAmelCase =(i - 1) // (config["""num_res_blocks"""] + 1) _lowerCAmelCase =(i - 1) % (config["""num_res_blocks"""] + 1) _lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key] _lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key] if F'''input_blocks.{i}.0.op.weight''' in checkpoint: _lowerCAmelCase =checkpoint[ F'''input_blocks.{i}.0.op.weight''' ] _lowerCAmelCase =checkpoint[ F'''input_blocks.{i}.0.op.bias''' ] continue _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase ={"""old""": F'''input_blocks.{i}.0''', """new""": F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''} _lowerCAmelCase ={"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""} assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=__UpperCamelCase ) if len(__UpperCamelCase ): _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """old""": F'''input_blocks.{i}.1''', """new""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''', } _lowerCAmelCase ={ F'''input_blocks.{i}.1.qkv.bias''': { """key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', """query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', """value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''input_blocks.{i}.1.qkv.weight''': { """key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', """query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', """value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase , ) _lowerCAmelCase =middle_blocks[0] _lowerCAmelCase =middle_blocks[1] _lowerCAmelCase =middle_blocks[2] _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase ) _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase ) _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """middle_block.1.qkv.bias""": { """key""": """mid_block.attentions.0.key.bias""", """query""": """mid_block.attentions.0.query.bias""", """value""": """mid_block.attentions.0.value.bias""", }, """middle_block.1.qkv.weight""": { """key""": """mid_block.attentions.0.key.weight""", """query""": """mid_block.attentions.0.query.weight""", """value""": """mid_block.attentions.0.value.weight""", }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase ) for i in range(__UpperCamelCase ): _lowerCAmelCase =i // (config["""num_res_blocks"""] + 1) _lowerCAmelCase =i % (config["""num_res_blocks"""] + 1) _lowerCAmelCase =[shave_segments(__UpperCamelCase , 2 ) for name in output_blocks[i]] _lowerCAmelCase ={} for layer in output_block_layers: _lowerCAmelCase , _lowerCAmelCase =layer.split(""".""" )[0], shave_segments(__UpperCamelCase , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(__UpperCamelCase ) else: _lowerCAmelCase =[layer_name] if len(__UpperCamelCase ) > 1: _lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key] _lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key] _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase ={"""old""": F'''output_blocks.{i}.0''', """new""": F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''} assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): _lowerCAmelCase =list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] ) _lowerCAmelCase =checkpoint[ F'''output_blocks.{i}.{index}.conv.weight''' ] _lowerCAmelCase =checkpoint[ F'''output_blocks.{i}.{index}.conv.bias''' ] # Clear attentions as they have been attributed above. if len(__UpperCamelCase ) == 2: _lowerCAmelCase =[] if len(__UpperCamelCase ): _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """old""": F'''output_blocks.{i}.1''', """new""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''', } _lowerCAmelCase ={ F'''output_blocks.{i}.1.qkv.bias''': { """key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', """query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', """value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''output_blocks.{i}.1.qkv.weight''': { """key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', """query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', """value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=__UpperCamelCase , ) else: _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase , n_shave_prefix_segments=1 ) for path in resnet_0_paths: _lowerCAmelCase =""".""".join(["""output_blocks""", str(__UpperCamelCase ), path["""old"""]] ) _lowerCAmelCase =""".""".join(["""up_blocks""", str(__UpperCamelCase ), """resnets""", str(__UpperCamelCase ), path["""new"""]] ) _lowerCAmelCase =checkpoint[old_path] return new_checkpoint if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the architecture.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') __A = parser.parse_args() __A = torch.load(args.checkpoint_path) with open(args.config_file) as f: __A = json.loads(f.read()) __A = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] __A = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: __A = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1])) __A = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1])) __A = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
341
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = KandinskyVaaInpaintPipeline lowerCamelCase = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image'''] lowerCamelCase = [ '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''', ] lowerCamelCase = [ '''generator''', '''height''', '''width''', '''latents''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] lowerCamelCase = False @property def _lowerCAmelCase ( self ) -> Union[str, Any]: return 32 @property def _lowerCAmelCase ( self ) -> Optional[int]: return 32 @property def _lowerCAmelCase ( self ) -> List[Any]: return self.time_input_dim @property def _lowerCAmelCase ( self ) -> Tuple: return self.time_input_dim * 4 @property def _lowerCAmelCase ( self ) -> int: return 1_00 @property def _lowerCAmelCase ( self ) -> Any: torch.manual_seed(0 ) _lowerCAmelCase ={ """in_channels""": 9, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } _lowerCAmelCase =UNetaDConditionModel(**__UpperCAmelCase ) return model @property def _lowerCAmelCase ( self ) -> Optional[int]: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def _lowerCAmelCase ( self ) -> List[str]: torch.manual_seed(0 ) _lowerCAmelCase =VQModel(**self.dummy_movq_kwargs ) return model def _lowerCAmelCase ( self ) -> Dict: _lowerCAmelCase =self.dummy_unet _lowerCAmelCase =self.dummy_movq _lowerCAmelCase =DDIMScheduler( num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__UpperCAmelCase , ) _lowerCAmelCase ={ """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ) -> Optional[Any]: _lowerCAmelCase =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) _lowerCAmelCase =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __UpperCAmelCase ) # create init_image _lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) _lowerCAmelCase =image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((2_56, 2_56) ) # create mask _lowerCAmelCase =np.ones((64, 64) , dtype=np.floataa ) _lowerCAmelCase =0 if str(__UpperCAmelCase ).startswith("""mps""" ): _lowerCAmelCase =torch.manual_seed(__UpperCAmelCase ) else: _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) _lowerCAmelCase ={ """image""": init_image, """mask_image""": mask, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 2, """guidance_scale""": 4.0, """output_type""": """np""", } return inputs def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase ="""cpu""" _lowerCAmelCase =self.get_dummy_components() _lowerCAmelCase =self.pipeline_class(**__UpperCAmelCase ) _lowerCAmelCase =pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase =pipe(**self.get_dummy_inputs(__UpperCAmelCase ) ) _lowerCAmelCase =output.images _lowerCAmelCase =pipe( **self.get_dummy_inputs(__UpperCAmelCase ) , return_dict=__UpperCAmelCase , )[0] _lowerCAmelCase =image[0, -3:, -3:, -1] _lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1] print(f'''image.shape {image.shape}''' ) assert image.shape == (1, 64, 64, 3) _lowerCAmelCase =np.array( [0.5_0_7_7_5_9_0_3, 0.4_9_5_2_7_1_9_5, 0.4_8_8_2_4_5_4_3, 0.5_0_1_9_2_2_3_7, 0.4_8_6_4_4_9_0_6, 0.4_9_3_7_3_8_1_4, 0.4_7_8_0_5_9_8, 0.4_7_2_3_4_8_2_7, 0.4_8_3_2_7_8_4_8] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' def _lowerCAmelCase ( self ) -> str: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self ) -> Dict: _lowerCAmelCase =load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" ) _lowerCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) _lowerCAmelCase =np.ones((7_68, 7_68) , dtype=np.floataa ) _lowerCAmelCase =0 _lowerCAmelCase ="""a hat""" _lowerCAmelCase =KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(__UpperCAmelCase ) _lowerCAmelCase =KandinskyVaaInpaintPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa ) _lowerCAmelCase =pipeline.to(__UpperCAmelCase ) pipeline.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase =torch.Generator(device="""cpu""" ).manual_seed(0 ) _lowerCAmelCase , _lowerCAmelCase =pipe_prior( __UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() _lowerCAmelCase =pipeline( image=__UpperCAmelCase , mask_image=__UpperCAmelCase , image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="""np""" , ) _lowerCAmelCase =output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
341
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]: _lowerCAmelCase =0 _lowerCAmelCase =len(__UpperCamelCase ) for i in range(n - 1 ): for j in range(i + 1 , __UpperCamelCase ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def _lowerCamelCase(__UpperCamelCase ) -> List[Any]: if len(__UpperCamelCase ) <= 1: return arr, 0 _lowerCAmelCase =len(__UpperCamelCase ) // 2 _lowerCAmelCase =arr[0:mid] _lowerCAmelCase =arr[mid:] _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =_count_cross_inversions(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =inversion_p + inversions_q + cross_inversions return c, num_inversions def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Any: _lowerCAmelCase =[] _lowerCAmelCase =_lowerCAmelCase =_lowerCAmelCase =0 while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(__UpperCamelCase ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(__UpperCamelCase ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def _lowerCamelCase() -> str: _lowerCAmelCase =[10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) _lowerCAmelCase =count_inversions_bf(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 8 print("""number of inversions = """ , __UpperCamelCase ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() _lowerCAmelCase =count_inversions_bf(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , __UpperCamelCase ) # an empty list should also have zero inversions _lowerCAmelCase =[] _lowerCAmelCase =count_inversions_bf(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , __UpperCamelCase ) if __name__ == "__main__": main()
341
1
"""simple docstring""" from ...processing_utils import ProcessorMixin class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = '''SpeechT5FeatureExtractor''' lowerCamelCase = '''SpeechT5Tokenizer''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: _lowerCAmelCase =kwargs.pop("""audio""" , __UpperCAmelCase ) _lowerCAmelCase =kwargs.pop("""text""" , __UpperCAmelCase ) _lowerCAmelCase =kwargs.pop("""text_target""" , __UpperCAmelCase ) _lowerCAmelCase =kwargs.pop("""audio_target""" , __UpperCAmelCase ) _lowerCAmelCase =kwargs.pop("""sampling_rate""" , __UpperCAmelCase ) if audio is not None and text is not None: raise ValueError( """Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" ) if audio_target is not None and text_target is not None: raise ValueError( """Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" ) if audio is None and audio_target is None and text is None and text_target is None: raise ValueError( """You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" ) if audio is not None: _lowerCAmelCase =self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase ) elif text is not None: _lowerCAmelCase =self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase ) else: _lowerCAmelCase =None if audio_target is not None: _lowerCAmelCase =self.feature_extractor(audio_target=__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase ) _lowerCAmelCase =targets["""input_values"""] elif text_target is not None: _lowerCAmelCase =self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase ) _lowerCAmelCase =targets["""input_ids"""] else: _lowerCAmelCase =None if inputs is None: return targets if targets is not None: _lowerCAmelCase =labels _lowerCAmelCase =targets.get("""attention_mask""" ) if decoder_attention_mask is not None: _lowerCAmelCase =decoder_attention_mask return inputs def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: _lowerCAmelCase =kwargs.pop("""input_values""" , __UpperCAmelCase ) _lowerCAmelCase =kwargs.pop("""input_ids""" , __UpperCAmelCase ) _lowerCAmelCase =kwargs.pop("""labels""" , __UpperCAmelCase ) if input_values is not None and input_ids is not None: raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" ) if input_values is None and input_ids is None and labels is None: raise ValueError( """You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" ) if input_values is not None: _lowerCAmelCase =self.feature_extractor.pad(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ) elif input_ids is not None: _lowerCAmelCase =self.tokenizer.pad(__UpperCAmelCase , **__UpperCAmelCase ) else: _lowerCAmelCase =None if labels is not None: if "input_ids" in labels or (isinstance(__UpperCAmelCase , __UpperCAmelCase ) and "input_ids" in labels[0]): _lowerCAmelCase =self.tokenizer.pad(__UpperCAmelCase , **__UpperCAmelCase ) _lowerCAmelCase =targets["""input_ids"""] else: _lowerCAmelCase =self.feature_extractor.feature_size _lowerCAmelCase =self.feature_extractor.num_mel_bins _lowerCAmelCase =self.feature_extractor.pad(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ) _lowerCAmelCase =feature_size_hack _lowerCAmelCase =targets["""input_values"""] else: _lowerCAmelCase =None if inputs is None: return targets if targets is not None: _lowerCAmelCase =labels _lowerCAmelCase =targets.get("""attention_mask""" ) if decoder_attention_mask is not None: _lowerCAmelCase =decoder_attention_mask return inputs def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
341
"""simple docstring""" import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class lowerCamelCase__ : '''simple docstring''' lowerCamelCase = None lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = None lowerCamelCase = None lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = True lowerCamelCase = None lowerCamelCase = 1 lowerCamelCase = None lowerCamelCase = False lowerCamelCase = None lowerCamelCase = None def _lowerCAmelCase ( self ) -> "DownloadConfig": return self.__class__(**{k: copy.deepcopy(__UpperCAmelCase ) for k, v in self.__dict__.items()} )
341
1
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]: if height >= 1: move_tower(height - 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) move_disk(__UpperCamelCase , __UpperCamelCase ) move_tower(height - 1 , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> str: print("""moving disk from""" , __UpperCamelCase , """to""" , __UpperCamelCase ) def _lowerCamelCase() -> Optional[Any]: _lowerCAmelCase =int(input("""Height of hanoi: """ ).strip() ) move_tower(__UpperCamelCase , """A""" , """B""" , """C""" ) if __name__ == "__main__": main()
341
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> int: return int((input_a, input_a).count(1 ) != 0 ) def _lowerCamelCase() -> None: assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
341
1
"""simple docstring""" import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=7 ) -> Optional[int]: _lowerCAmelCase =None if token is not None: _lowerCAmelCase ={"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''} # The id of a workflow (not of a workflow run) _lowerCAmelCase ="""636036""" _lowerCAmelCase =F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs''' # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}''' _lowerCAmelCase =requests.get(__UpperCamelCase , headers=__UpperCamelCase ).json() return result["workflow_runs"] def _lowerCamelCase(__UpperCamelCase ) -> Any: _lowerCAmelCase =get_daily_ci_runs(__UpperCamelCase ) _lowerCAmelCase =None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": _lowerCAmelCase =workflow_run["""id"""] break return workflow_run_id def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str: _lowerCAmelCase =get_last_daily_ci_runs(__UpperCamelCase ) if workflow_run_id is not None: _lowerCAmelCase =get_artifacts_links(worflow_run_id=__UpperCamelCase , token=__UpperCamelCase ) for artifact_name in artifact_names: if artifact_name in artifacts_links: _lowerCAmelCase =artifacts_links[artifact_name] download_artifact( artifact_name=__UpperCamelCase , artifact_url=__UpperCamelCase , output_dir=__UpperCamelCase , token=__UpperCamelCase ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]: get_last_daily_ci_artifacts(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase ={} for artifact_name in artifact_names: _lowerCAmelCase =os.path.join(__UpperCamelCase , F'''{artifact_name}.zip''' ) if os.path.isfile(__UpperCamelCase ): _lowerCAmelCase ={} with zipfile.ZipFile(__UpperCamelCase ) as z: for filename in z.namelist(): if not os.path.isdir(__UpperCamelCase ): # read the file with z.open(__UpperCamelCase ) as f: _lowerCAmelCase =f.read().decode("""UTF-8""" ) return results
341
"""simple docstring""" import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py __A = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n' __A = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n' __A = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): '''simple docstring''' def _lowerCAmelCase ( self ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[ """https://en.wikipedia.org/wiki/BLEU""", """https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""", ] , ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=4 , __UpperCAmelCase=False ) -> Tuple: _lowerCAmelCase =compute_bleu( reference_corpus=__UpperCAmelCase , translation_corpus=__UpperCAmelCase , max_order=__UpperCAmelCase , smooth=__UpperCAmelCase ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) =score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
341
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = {} class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = '''llama''' lowerCamelCase = ['''past_key_values'''] def __init__( self , __UpperCAmelCase=3_20_00 , __UpperCAmelCase=40_96 , __UpperCAmelCase=1_10_08 , __UpperCAmelCase=32 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase="silu" , __UpperCAmelCase=20_48 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-6 , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=False , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]: _lowerCAmelCase =vocab_size _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =hidden_size _lowerCAmelCase =intermediate_size _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads # for backward compatibility if num_key_value_heads is None: _lowerCAmelCase =num_attention_heads _lowerCAmelCase =num_key_value_heads _lowerCAmelCase =hidden_act _lowerCAmelCase =initializer_range _lowerCAmelCase =rms_norm_eps _lowerCAmelCase =pretraining_tp _lowerCAmelCase =use_cache _lowerCAmelCase =rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , ) def _lowerCAmelCase ( self ) -> str: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ f'''got {self.rope_scaling}''' ) _lowerCAmelCase =self.rope_scaling.get("""type""" , __UpperCAmelCase ) _lowerCAmelCase =self.rope_scaling.get("""factor""" , __UpperCAmelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
341
"""simple docstring""" import argparse from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--original_config_file', type=str, required=True, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--image_size', default=512, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') def _lowerCamelCase(__UpperCamelCase ) -> List[str]: if string == "True": return True elif string == "False": return False else: raise ValueError(F'''could not parse string as bool {string}''' ) parser.add_argument( '--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool ) parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int) __A = parser.parse_args() __A = download_controlnet_from_original_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, extract_ema=args.extract_ema, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, use_linear_projection=args.use_linear_projection, cross_attention_dim=args.cross_attention_dim, ) controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
341
1
"""simple docstring""" import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' def _lowerCAmelCase ( self ) -> Optional[int]: _lowerCAmelCase =self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__UpperCAmelCase , """tf_padding""" ) ) self.parent.assertTrue(hasattr(__UpperCAmelCase , """depth_multiplier""" ) ) class lowerCamelCase__ : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=3 , __UpperCAmelCase=32 , __UpperCAmelCase=0.2_5 , __UpperCAmelCase=8 , __UpperCAmelCase=True , __UpperCAmelCase=10_24 , __UpperCAmelCase=32 , __UpperCAmelCase="relu6" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=None , ) -> Any: _lowerCAmelCase =parent _lowerCAmelCase =batch_size _lowerCAmelCase =num_channels _lowerCAmelCase =image_size _lowerCAmelCase =depth_multiplier _lowerCAmelCase =min_depth _lowerCAmelCase =tf_padding _lowerCAmelCase =int(last_hidden_size * depth_multiplier ) _lowerCAmelCase =output_stride _lowerCAmelCase =hidden_act _lowerCAmelCase =classifier_dropout_prob _lowerCAmelCase =use_labels _lowerCAmelCase =is_training _lowerCAmelCase =num_labels _lowerCAmelCase =initializer_range _lowerCAmelCase =scope def _lowerCAmelCase ( self ) -> Optional[int]: _lowerCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase =None _lowerCAmelCase =None if self.use_labels: _lowerCAmelCase =ids_tensor([self.batch_size] , self.num_labels ) _lowerCAmelCase =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _lowerCAmelCase =self.get_config() return config, pixel_values, labels, pixel_labels def _lowerCAmelCase ( self ) -> List[str]: return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: _lowerCAmelCase =MobileNetVaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _lowerCAmelCase =model(__UpperCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: _lowerCAmelCase =self.num_labels _lowerCAmelCase =MobileNetVaForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _lowerCAmelCase =model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCAmelCase ( self ) -> Dict: _lowerCAmelCase =self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs _lowerCAmelCase ={"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else () lowerCamelCase = ( {'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification} if is_torch_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =MobileNetVaModelTester(self ) _lowerCAmelCase =MobileNetVaConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Optional[int]: self.config_tester.run_common_tests() @unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" ) def _lowerCAmelCase ( self ) -> Tuple: pass @unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" ) def _lowerCAmelCase ( self ) -> str: pass @unittest.skip(reason="""MobileNetV1 does not output attentions""" ) def _lowerCAmelCase ( self ) -> List[Any]: pass def _lowerCAmelCase ( self ) -> Optional[int]: _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase =model_class(__UpperCAmelCase ) _lowerCAmelCase =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase =[*signature.parameters.keys()] _lowerCAmelCase =["""pixel_values"""] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def _lowerCAmelCase ( self ) -> str: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Optional[Any]: def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): _lowerCAmelCase =model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): _lowerCAmelCase =model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) _lowerCAmelCase =outputs.hidden_states _lowerCAmelCase =26 self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase =True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase =True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) @slow def _lowerCAmelCase ( self ) -> Union[str, Any]: for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase =MobileNetVaModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def _lowerCamelCase() -> Union[str, Any]: _lowerCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def _lowerCAmelCase ( self ) -> Optional[Any]: return ( MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None ) @slow def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(__UpperCAmelCase ) _lowerCAmelCase =self.default_image_processor _lowerCAmelCase =prepare_img() _lowerCAmelCase =image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): _lowerCAmelCase =model(**__UpperCAmelCase ) # verify the logits _lowerCAmelCase =torch.Size((1, 10_01) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) _lowerCAmelCase =torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
341
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available __A = { 'configuration_audio_spectrogram_transformer': [ 'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ASTConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'ASTForAudioClassification', 'ASTModel', 'ASTPreTrainedModel', ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['ASTFeatureExtractor'] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
1
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = { 'configuration_informer': [ 'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'InformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'InformerForPrediction', 'InformerModel', 'InformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __A = { 'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'], 'tokenization_m2m_100': ['M2M100Tokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST', 'M2M100ForConditionalGeneration', 'M2M100Model', 'M2M100PreTrainedModel', ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json', 'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json', 'uclanlp/visualbert-vqa-coco-pre': ( 'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json' ), 'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json', 'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json', 'uclanlp/visualbert-vcr-coco-pre': ( 'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json' ), 'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json', 'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json', 'uclanlp/visualbert-nlvr2-coco-pre': ( 'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json' ) # See all VisualBERT models at https://huggingface.co/models?filter=visual_bert } class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = '''visual_bert''' def __init__( self , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=7_68 , __UpperCAmelCase=5_12 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-12 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , **__UpperCAmelCase , ) -> int: super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) _lowerCAmelCase =vocab_size _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =hidden_size _lowerCAmelCase =visual_embedding_dim _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads _lowerCAmelCase =intermediate_size _lowerCAmelCase =hidden_act _lowerCAmelCase =hidden_dropout_prob _lowerCAmelCase =attention_probs_dropout_prob _lowerCAmelCase =initializer_range _lowerCAmelCase =type_vocab_size _lowerCAmelCase =layer_norm_eps _lowerCAmelCase =bypass_transformer _lowerCAmelCase =special_visual_initialize
341
"""simple docstring""" import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets __A = datasets.logging.get_logger(__name__) __A = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n' __A = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n' __A = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n' def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="dummy_doc" ) -> Dict: _lowerCAmelCase ={doc: key_lines} _lowerCAmelCase ={doc: sys_lines} _lowerCAmelCase ={} _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase ) key_singletons_num += singletons_num if NP_only or min_span: _lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , sys_doc_lines[doc] , __UpperCamelCase ) sys_singletons_num += singletons_num if NP_only or min_span: _lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase ) if remove_nested: _lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters _lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters _lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =(key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( """Number of removed nested coreferring mentions in the key """ F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' ) logger.info( """Number of resulting singleton clusters in the key """ F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' ) if not keep_singletons: logger.info( F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ''' """files, respectively""" ) return doc_coref_infos def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int: _lowerCAmelCase =get_coref_infos(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase ={} _lowerCAmelCase =0 _lowerCAmelCase =0 for name, metric in metrics: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =evaluator.evaluate_documents(__UpperCamelCase , __UpperCamelCase , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} ) logger.info( name.ljust(10 ) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , ) if conll_subparts_num == 3: _lowerCAmelCase =(conll / 3) * 100 logger.info(F'''CoNLL score: {conll:.2f}''' ) output_scores.update({"""conll_score""": conll} ) return output_scores def _lowerCamelCase(__UpperCamelCase ) -> Tuple: _lowerCAmelCase =False for line in key_lines: if not line.startswith("""#""" ): if len(line.split() ) > 6: _lowerCAmelCase =line.split()[5] if not parse_col == "-": _lowerCAmelCase =True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): '''simple docstring''' def _lowerCAmelCase ( self ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Sequence(datasets.Value("""string""" ) ), } ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[ """https://github.com/ns-moosavi/coval""", """https://www.aclweb.org/anthology/P16-1060""", """http://www.conll.cemantix.org/2012/data.html""", ] , ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False ) -> Optional[Any]: _lowerCAmelCase =[ ("""mentions""", evaluator.mentions), ("""muc""", evaluator.muc), ("""bcub""", evaluator.b_cubed), ("""ceafe""", evaluator.ceafe), ("""lea""", evaluator.lea), ] if min_span: _lowerCAmelCase =util.check_gold_parse_annotation(__UpperCAmelCase ) if not has_gold_parse: raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" _lowerCAmelCase =evaluate( key_lines=__UpperCAmelCase , sys_lines=__UpperCAmelCase , metrics=__UpperCAmelCase , NP_only=__UpperCAmelCase , remove_nested=__UpperCAmelCase , keep_singletons=__UpperCAmelCase , min_span=__UpperCAmelCase , ) return score
341
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = KandinskyInpaintPipeline lowerCamelCase = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image'''] lowerCamelCase = [ '''prompt''', '''negative_prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''', ] lowerCamelCase = [ '''generator''', '''height''', '''width''', '''latents''', '''guidance_scale''', '''negative_prompt''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] lowerCamelCase = False @property def _lowerCAmelCase ( self ) -> Optional[int]: return 32 @property def _lowerCAmelCase ( self ) -> List[str]: return 32 @property def _lowerCAmelCase ( self ) -> Optional[int]: return self.time_input_dim @property def _lowerCAmelCase ( self ) -> List[str]: return self.time_input_dim * 4 @property def _lowerCAmelCase ( self ) -> int: return 1_00 @property def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase =XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" ) return tokenizer @property def _lowerCAmelCase ( self ) -> List[Any]: torch.manual_seed(0 ) _lowerCAmelCase =MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , ) _lowerCAmelCase =MultilingualCLIP(__UpperCAmelCase ) _lowerCAmelCase =text_encoder.eval() return text_encoder @property def _lowerCAmelCase ( self ) -> Union[str, Any]: torch.manual_seed(0 ) _lowerCAmelCase ={ """in_channels""": 9, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """text_image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """text_image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } _lowerCAmelCase =UNetaDConditionModel(**__UpperCAmelCase ) return model @property def _lowerCAmelCase ( self ) -> Optional[Any]: return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def _lowerCAmelCase ( self ) -> int: torch.manual_seed(0 ) _lowerCAmelCase =VQModel(**self.dummy_movq_kwargs ) return model def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =self.dummy_text_encoder _lowerCAmelCase =self.dummy_tokenizer _lowerCAmelCase =self.dummy_unet _lowerCAmelCase =self.dummy_movq _lowerCAmelCase =DDIMScheduler( num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__UpperCAmelCase , ) _lowerCAmelCase ={ """text_encoder""": text_encoder, """tokenizer""": tokenizer, """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ) -> Any: _lowerCAmelCase =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) _lowerCAmelCase =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__UpperCAmelCase ) # create init_image _lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) _lowerCAmelCase =image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((2_56, 2_56) ) # create mask _lowerCAmelCase =np.ones((64, 64) , dtype=np.floataa ) _lowerCAmelCase =0 if str(__UpperCAmelCase ).startswith("""mps""" ): _lowerCAmelCase =torch.manual_seed(__UpperCAmelCase ) else: _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) _lowerCAmelCase ={ """prompt""": """horse""", """image""": init_image, """mask_image""": mask, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 2, """guidance_scale""": 4.0, """output_type""": """np""", } return inputs def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase ="""cpu""" _lowerCAmelCase =self.get_dummy_components() _lowerCAmelCase =self.pipeline_class(**__UpperCAmelCase ) _lowerCAmelCase =pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase =pipe(**self.get_dummy_inputs(__UpperCAmelCase ) ) _lowerCAmelCase =output.images _lowerCAmelCase =pipe( **self.get_dummy_inputs(__UpperCAmelCase ) , return_dict=__UpperCAmelCase , )[0] _lowerCAmelCase =image[0, -3:, -3:, -1] _lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1] print(f'''image.shape {image.shape}''' ) assert image.shape == (1, 64, 64, 3) _lowerCAmelCase =np.array( [0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' def _lowerCAmelCase ( self ) -> Union[str, Any]: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase =load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" ) _lowerCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) _lowerCAmelCase =np.ones((7_68, 7_68) , dtype=np.floataa ) _lowerCAmelCase =0 _lowerCAmelCase ="""a hat""" _lowerCAmelCase =KandinskyPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(__UpperCAmelCase ) _lowerCAmelCase =KandinskyInpaintPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa ) _lowerCAmelCase =pipeline.to(__UpperCAmelCase ) pipeline.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase =torch.Generator(device="""cpu""" ).manual_seed(0 ) _lowerCAmelCase , _lowerCAmelCase =pipe_prior( __UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() _lowerCAmelCase =pipeline( __UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="""np""" , ) _lowerCAmelCase =output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
341
"""simple docstring""" from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class lowerCamelCase__ : '''simple docstring''' lowerCamelCase = XGLMConfig lowerCamelCase = {} lowerCamelCase = '''gelu''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=0.0_2 , ) -> List[str]: _lowerCAmelCase =parent _lowerCAmelCase =batch_size _lowerCAmelCase =seq_length _lowerCAmelCase =is_training _lowerCAmelCase =use_input_mask _lowerCAmelCase =use_labels _lowerCAmelCase =vocab_size _lowerCAmelCase =d_model _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads _lowerCAmelCase =ffn_dim _lowerCAmelCase =activation_function _lowerCAmelCase =activation_dropout _lowerCAmelCase =attention_dropout _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =initializer_range _lowerCAmelCase =None _lowerCAmelCase =0 _lowerCAmelCase =2 _lowerCAmelCase =1 def _lowerCAmelCase ( self ) -> Dict: return XGLMConfig.from_pretrained("""facebook/xglm-564M""" ) def _lowerCAmelCase ( self ) -> str: _lowerCAmelCase =tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) _lowerCAmelCase =None if self.use_input_mask: _lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase =self.get_config() _lowerCAmelCase =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def _lowerCAmelCase ( self ) -> str: return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__UpperCAmelCase , ) def _lowerCAmelCase ( self ) -> Dict: _lowerCAmelCase =self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) =config_and_inputs _lowerCAmelCase ={ """input_ids""": input_ids, """head_mask""": head_mask, } return config, inputs_dict @require_tf class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () lowerCamelCase = (TFXGLMForCausalLM,) if is_tf_available() else () lowerCamelCase = ( {'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =TFXGLMModelTester(self ) _lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase , n_embd=37 ) def _lowerCAmelCase ( self ) -> int: self.config_tester.run_common_tests() @slow def _lowerCAmelCase ( self ) -> Union[str, Any]: for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase =TFXGLMModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" ) def _lowerCAmelCase ( self ) -> Union[str, Any]: super().test_resize_token_embeddings() @require_tf class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def _lowerCAmelCase ( self , __UpperCAmelCase=True ) -> str: _lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase =tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off _lowerCAmelCase =[2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81] # fmt: on _lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCAmelCase ) @slow def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) tf.random.set_seed(0 ) _lowerCAmelCase =tokenizer("""Today is a nice day and""" , return_tensors="""tf""" ) _lowerCAmelCase =tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(""":/CPU:0""" ): _lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , seed=[7, 0] ) _lowerCAmelCase =tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =( """Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due""" ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) @slow def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase ="""left""" # use different length sentences to test batching _lowerCAmelCase =[ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When""", """Hello, my dog is a little""", ] _lowerCAmelCase =tokenizer(__UpperCAmelCase , return_tensors="""tf""" , padding=__UpperCAmelCase ) _lowerCAmelCase =inputs["""input_ids"""] _lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 ) _lowerCAmelCase =tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids _lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 ) _lowerCAmelCase =tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids _lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 ) _lowerCAmelCase =tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =[ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """ """a single""", """Hello, my dog is a little bit of a shy one, but he is very friendly""", ] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
341
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __A = logging.get_logger(__name__) class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = ['''pixel_values'''] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 2_55 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> None: super().__init__(**__UpperCAmelCase ) _lowerCAmelCase =size if size is not None else {"""height""": 3_84, """width""": 3_84} _lowerCAmelCase =get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) _lowerCAmelCase =do_resize _lowerCAmelCase =size _lowerCAmelCase =resample _lowerCAmelCase =do_rescale _lowerCAmelCase =rescale_factor _lowerCAmelCase =do_normalize _lowerCAmelCase =image_mean if image_mean is not None else OPENAI_CLIP_MEAN _lowerCAmelCase =image_std if image_std is not None else OPENAI_CLIP_STD _lowerCAmelCase =do_convert_rgb def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: _lowerCAmelCase =get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' ) _lowerCAmelCase =(size["""height"""], size["""width"""]) return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> List[str]: return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ) -> PIL.Image.Image: _lowerCAmelCase =do_resize if do_resize is not None else self.do_resize _lowerCAmelCase =resample if resample is not None else self.resample _lowerCAmelCase =do_rescale if do_rescale is not None else self.do_rescale _lowerCAmelCase =rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCAmelCase =do_normalize if do_normalize is not None else self.do_normalize _lowerCAmelCase =image_mean if image_mean is not None else self.image_mean _lowerCAmelCase =image_std if image_std is not None else self.image_std _lowerCAmelCase =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb _lowerCAmelCase =size if size is not None else self.size _lowerCAmelCase =get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) _lowerCAmelCase =make_list_of_images(__UpperCAmelCase ) if not valid_images(__UpperCAmelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: _lowerCAmelCase =[convert_to_rgb(__UpperCAmelCase ) for image in images] # All transformations expect numpy arrays. _lowerCAmelCase =[to_numpy_array(__UpperCAmelCase ) for image in images] if do_resize: _lowerCAmelCase =[self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images] if do_rescale: _lowerCAmelCase =[self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images] if do_normalize: _lowerCAmelCase =[self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images] _lowerCAmelCase =[to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images] _lowerCAmelCase =BatchFeature(data={"""pixel_values""": images} , tensor_type=__UpperCAmelCase ) return encoded_outputs
341
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __A = logging.get_logger(__name__) __A = {'vocab_file': 'spiece.model'} __A = { 'vocab_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model', } } __A = { 'xlnet-base-cased': None, 'xlnet-large-cased': None, } # Segments (not really needed) __A = 0 __A = 1 __A = 2 __A = 3 __A = 4 class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = VOCAB_FILES_NAMES lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase = '''left''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<sep>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<cls>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<eop>", "<eod>"] , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token _lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , ) _lowerCAmelCase =3 _lowerCAmelCase =do_lower_case _lowerCAmelCase =remove_space _lowerCAmelCase =keep_accents _lowerCAmelCase =vocab_file _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCAmelCase ) @property def _lowerCAmelCase ( self ) -> str: return len(self.sp_model ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Optional[int]: _lowerCAmelCase =self.__dict__.copy() _lowerCAmelCase =None return state def __setstate__( self , __UpperCAmelCase ) -> Tuple: _lowerCAmelCase =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _lowerCAmelCase ={} _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[Any]: if self.remove_space: _lowerCAmelCase =""" """.join(inputs.strip().split() ) else: _lowerCAmelCase =inputs _lowerCAmelCase =outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" ) if not self.keep_accents: _lowerCAmelCase =unicodedata.normalize("""NFKD""" , __UpperCAmelCase ) _lowerCAmelCase ="""""".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] ) if self.do_lower_case: _lowerCAmelCase =outputs.lower() return outputs def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]: _lowerCAmelCase =self.preprocess_text(__UpperCAmelCase ) _lowerCAmelCase =self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase ) _lowerCAmelCase =[] for piece in pieces: if len(__UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): _lowerCAmelCase =self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , """""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: _lowerCAmelCase =cur_pieces[1:] else: _lowerCAmelCase =cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__UpperCAmelCase ) else: new_pieces.append(__UpperCAmelCase ) return new_pieces def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]: return self.sp_model.PieceToId(__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]: return self.sp_model.IdToPiece(__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str: _lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip() return out_string def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> str: _lowerCAmelCase =kwargs.pop("""use_source_tokenizer""" , __UpperCAmelCase ) _lowerCAmelCase =self.convert_ids_to_tokens(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 _lowerCAmelCase =[] _lowerCAmelCase =[] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) ) _lowerCAmelCase =[] sub_texts.append(__UpperCAmelCase ) else: current_sub_text.append(__UpperCAmelCase ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens _lowerCAmelCase ="""""".join(__UpperCAmelCase ) _lowerCAmelCase =( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: _lowerCAmelCase =self.clean_up_tokenization(__UpperCAmelCase ) return clean_text else: return text def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _lowerCAmelCase =[self.sep_token_id] _lowerCAmelCase =[self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is not None: return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] return ([0] * len(__UpperCAmelCase )) + [1, 1] def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _lowerCAmelCase =[self.sep_token_id] _lowerCAmelCase =[2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _lowerCAmelCase =os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase , """wb""" ) as fi: _lowerCAmelCase =self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,)
341
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = '''vit_msn''' def __init__( self , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-06 , __UpperCAmelCase=2_24 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=True , **__UpperCAmelCase , ) -> Optional[int]: super().__init__(**__UpperCAmelCase ) _lowerCAmelCase =hidden_size _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads _lowerCAmelCase =intermediate_size _lowerCAmelCase =hidden_act _lowerCAmelCase =hidden_dropout_prob _lowerCAmelCase =attention_probs_dropout_prob _lowerCAmelCase =initializer_range _lowerCAmelCase =layer_norm_eps _lowerCAmelCase =image_size _lowerCAmelCase =patch_size _lowerCAmelCase =num_channels _lowerCAmelCase =qkv_bias
341
"""simple docstring""" from __future__ import annotations def _lowerCamelCase(__UpperCamelCase ) -> bool: _lowerCAmelCase =str(__UpperCamelCase ) return n == n[::-1] def _lowerCamelCase(__UpperCamelCase = 1000000 ) -> str: _lowerCAmelCase =0 for i in range(1 , __UpperCamelCase ): if is_palindrome(__UpperCamelCase ) and is_palindrome(bin(__UpperCamelCase ).split("""b""" )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
341
1
"""simple docstring""" from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter: _lowerCAmelCase =tau * frequency / samplerate _lowerCAmelCase =sin(__UpperCamelCase ) _lowerCAmelCase =cos(__UpperCamelCase ) _lowerCAmelCase =_sin / (2 * q_factor) _lowerCAmelCase =(1 - _cos) / 2 _lowerCAmelCase =1 - _cos _lowerCAmelCase =1 + alpha _lowerCAmelCase =-2 * _cos _lowerCAmelCase =1 - alpha _lowerCAmelCase =IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter: _lowerCAmelCase =tau * frequency / samplerate _lowerCAmelCase =sin(__UpperCamelCase ) _lowerCAmelCase =cos(__UpperCamelCase ) _lowerCAmelCase =_sin / (2 * q_factor) _lowerCAmelCase =(1 + _cos) / 2 _lowerCAmelCase =-1 - _cos _lowerCAmelCase =1 + alpha _lowerCAmelCase =-2 * _cos _lowerCAmelCase =1 - alpha _lowerCAmelCase =IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter: _lowerCAmelCase =tau * frequency / samplerate _lowerCAmelCase =sin(__UpperCamelCase ) _lowerCAmelCase =cos(__UpperCamelCase ) _lowerCAmelCase =_sin / (2 * q_factor) _lowerCAmelCase =_sin / 2 _lowerCAmelCase =0 _lowerCAmelCase =-ba _lowerCAmelCase =1 + alpha _lowerCAmelCase =-2 * _cos _lowerCAmelCase =1 - alpha _lowerCAmelCase =IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2 ) ) -> IIRFilter: _lowerCAmelCase =tau * frequency / samplerate _lowerCAmelCase =sin(__UpperCamelCase ) _lowerCAmelCase =cos(__UpperCamelCase ) _lowerCAmelCase =_sin / (2 * q_factor) _lowerCAmelCase =1 - alpha _lowerCAmelCase =-2 * _cos _lowerCAmelCase =1 + alpha _lowerCAmelCase =IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter: _lowerCAmelCase =tau * frequency / samplerate _lowerCAmelCase =sin(__UpperCamelCase ) _lowerCAmelCase =cos(__UpperCamelCase ) _lowerCAmelCase =_sin / (2 * q_factor) _lowerCAmelCase =10 ** (gain_db / 40) _lowerCAmelCase =1 + alpha * big_a _lowerCAmelCase =-2 * _cos _lowerCAmelCase =1 - alpha * big_a _lowerCAmelCase =1 + alpha / big_a _lowerCAmelCase =-2 * _cos _lowerCAmelCase =1 - alpha / big_a _lowerCAmelCase =IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter: _lowerCAmelCase =tau * frequency / samplerate _lowerCAmelCase =sin(__UpperCamelCase ) _lowerCAmelCase =cos(__UpperCamelCase ) _lowerCAmelCase =_sin / (2 * q_factor) _lowerCAmelCase =10 ** (gain_db / 40) _lowerCAmelCase =(big_a + 1) - (big_a - 1) * _cos _lowerCAmelCase =(big_a + 1) + (big_a - 1) * _cos _lowerCAmelCase =(big_a - 1) - (big_a + 1) * _cos _lowerCAmelCase =(big_a - 1) + (big_a + 1) * _cos _lowerCAmelCase =2 * sqrt(__UpperCamelCase ) * alpha _lowerCAmelCase =big_a * (pmc + aaa) _lowerCAmelCase =2 * big_a * mpc _lowerCAmelCase =big_a * (pmc - aaa) _lowerCAmelCase =ppmc + aaa _lowerCAmelCase =-2 * pmpc _lowerCAmelCase =ppmc - aaa _lowerCAmelCase =IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 / sqrt(2 ) , ) -> IIRFilter: _lowerCAmelCase =tau * frequency / samplerate _lowerCAmelCase =sin(__UpperCamelCase ) _lowerCAmelCase =cos(__UpperCamelCase ) _lowerCAmelCase =_sin / (2 * q_factor) _lowerCAmelCase =10 ** (gain_db / 40) _lowerCAmelCase =(big_a + 1) - (big_a - 1) * _cos _lowerCAmelCase =(big_a + 1) + (big_a - 1) * _cos _lowerCAmelCase =(big_a - 1) - (big_a + 1) * _cos _lowerCAmelCase =(big_a - 1) + (big_a + 1) * _cos _lowerCAmelCase =2 * sqrt(__UpperCamelCase ) * alpha _lowerCAmelCase =big_a * (ppmc + aaa) _lowerCAmelCase =-2 * big_a * pmpc _lowerCAmelCase =big_a * (ppmc - aaa) _lowerCAmelCase =pmc + aaa _lowerCAmelCase =2 * mpc _lowerCAmelCase =pmc - aaa _lowerCAmelCase =IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
341
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = {} class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = '''llama''' lowerCamelCase = ['''past_key_values'''] def __init__( self , __UpperCAmelCase=3_20_00 , __UpperCAmelCase=40_96 , __UpperCAmelCase=1_10_08 , __UpperCAmelCase=32 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase="silu" , __UpperCAmelCase=20_48 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-6 , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=False , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]: _lowerCAmelCase =vocab_size _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =hidden_size _lowerCAmelCase =intermediate_size _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads # for backward compatibility if num_key_value_heads is None: _lowerCAmelCase =num_attention_heads _lowerCAmelCase =num_key_value_heads _lowerCAmelCase =hidden_act _lowerCAmelCase =initializer_range _lowerCAmelCase =rms_norm_eps _lowerCAmelCase =pretraining_tp _lowerCAmelCase =use_cache _lowerCAmelCase =rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , ) def _lowerCAmelCase ( self ) -> str: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ f'''got {self.rope_scaling}''' ) _lowerCAmelCase =self.rope_scaling.get("""type""" , __UpperCAmelCase ) _lowerCAmelCase =self.rope_scaling.get("""factor""" , __UpperCAmelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
341
1
"""simple docstring""" from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar __A = TypeVar('T') def _lowerCamelCase(__UpperCamelCase ) -> int: return (position - 1) // 2 def _lowerCamelCase(__UpperCamelCase ) -> int: return (2 * position) + 1 def _lowerCamelCase(__UpperCamelCase ) -> int: return (2 * position) + 2 class lowerCamelCase__ ( Generic[T] ): '''simple docstring''' def __init__( self ) -> None: _lowerCAmelCase =[] _lowerCAmelCase ={} _lowerCAmelCase =0 def __len__( self ) -> int: return self.elements def __repr__( self ) -> str: return str(self.heap ) def _lowerCAmelCase ( self ) -> bool: # Check if the priority queue is empty return self.elements == 0 def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None: # Add an element with given priority to the queue self.heap.append((elem, weight) ) _lowerCAmelCase =self.elements self.elements += 1 self._bubble_up(__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> T: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) _lowerCAmelCase , _lowerCAmelCase =self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: _lowerCAmelCase , _lowerCAmelCase =self.heap[0] self._bubble_down(__UpperCAmelCase ) return elem def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None: # Update the weight of the given key _lowerCAmelCase =self.position_map[elem] _lowerCAmelCase =(elem, weight) if position > 0: _lowerCAmelCase =get_parent_position(__UpperCAmelCase ) _lowerCAmelCase , _lowerCAmelCase =self.heap[parent_position] if parent_weight > weight: self._bubble_up(__UpperCAmelCase ) else: self._bubble_down(__UpperCAmelCase ) else: self._bubble_down(__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> None: # Place a node at the proper position (upward movement) [to be used internally # only] _lowerCAmelCase =self.position_map[elem] if curr_pos == 0: return None _lowerCAmelCase =get_parent_position(__UpperCAmelCase ) _lowerCAmelCase , _lowerCAmelCase =self.heap[curr_pos] _lowerCAmelCase , _lowerCAmelCase =self.heap[parent_position] if parent_weight > weight: self._swap_nodes(__UpperCAmelCase , __UpperCAmelCase ) return self._bubble_up(__UpperCAmelCase ) return None def _lowerCAmelCase ( self , __UpperCAmelCase ) -> None: # Place a node at the proper position (downward movement) [to be used # internally only] _lowerCAmelCase =self.position_map[elem] _lowerCAmelCase , _lowerCAmelCase =self.heap[curr_pos] _lowerCAmelCase =get_child_left_position(__UpperCAmelCase ) _lowerCAmelCase =get_child_right_position(__UpperCAmelCase ) if child_left_position < self.elements and child_right_position < self.elements: _lowerCAmelCase , _lowerCAmelCase =self.heap[child_left_position] _lowerCAmelCase , _lowerCAmelCase =self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(__UpperCAmelCase , __UpperCAmelCase ) return self._bubble_down(__UpperCAmelCase ) if child_left_position < self.elements: _lowerCAmelCase , _lowerCAmelCase =self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(__UpperCAmelCase , __UpperCAmelCase ) return self._bubble_down(__UpperCAmelCase ) else: return None if child_right_position < self.elements: _lowerCAmelCase , _lowerCAmelCase =self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(__UpperCAmelCase , __UpperCAmelCase ) return self._bubble_down(__UpperCAmelCase ) return None def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None: # Swap the nodes at the given positions _lowerCAmelCase =self.heap[nodea_pos][0] _lowerCAmelCase =self.heap[nodea_pos][0] _lowerCAmelCase , _lowerCAmelCase =( self.heap[nodea_pos], self.heap[nodea_pos], ) _lowerCAmelCase =nodea_pos _lowerCAmelCase =nodea_pos class lowerCamelCase__ ( Generic[T] ): '''simple docstring''' def __init__( self ) -> None: _lowerCAmelCase ={} _lowerCAmelCase =0 def __repr__( self ) -> str: return str(self.connections ) def __len__( self ) -> int: return self.nodes def _lowerCAmelCase ( self , __UpperCAmelCase ) -> None: # Add a node in the graph if it is not in the graph if node not in self.connections: _lowerCAmelCase ={} self.nodes += 1 def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> None: # Add an edge between 2 nodes in the graph self.add_node(__UpperCAmelCase ) self.add_node(__UpperCAmelCase ) _lowerCAmelCase =weight _lowerCAmelCase =weight def _lowerCamelCase(__UpperCamelCase , ) -> tuple[dict[T, int], dict[T, T | None]]: _lowerCAmelCase ={node: maxsize for node in graph.connections} _lowerCAmelCase ={node: None for node in graph.connections} _lowerCAmelCase =MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(__UpperCamelCase , __UpperCamelCase ) if priority_queue.is_empty(): return dist, parent # initialization _lowerCAmelCase =priority_queue.extract_min() _lowerCAmelCase =0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _lowerCAmelCase =dist[node] + graph.connections[node][neighbour] priority_queue.update_key(__UpperCamelCase , dist[neighbour] ) _lowerCAmelCase =node # running prim's algorithm while not priority_queue.is_empty(): _lowerCAmelCase =priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _lowerCAmelCase =dist[node] + graph.connections[node][neighbour] priority_queue.update_key(__UpperCamelCase , dist[neighbour] ) _lowerCAmelCase =node return dist, parent
341
"""simple docstring""" import warnings from .generation import TFGenerationMixin class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' # warning at import time warnings.warn( '''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will ''' '''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , __magic_name__ , )
341
1
"""simple docstring""" import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL __A = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11') def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , ) -> Any: output_path.parent.mkdir(parents=__UpperCamelCase , exist_ok=__UpperCamelCase ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( __UpperCamelCase , __UpperCamelCase , f=output_path.as_posix() , input_names=__UpperCamelCase , output_names=__UpperCamelCase , dynamic_axes=__UpperCamelCase , do_constant_folding=__UpperCamelCase , use_external_data_format=__UpperCamelCase , enable_onnx_checker=__UpperCamelCase , opset_version=__UpperCamelCase , ) else: export( __UpperCamelCase , __UpperCamelCase , f=output_path.as_posix() , input_names=__UpperCamelCase , output_names=__UpperCamelCase , dynamic_axes=__UpperCamelCase , do_constant_folding=__UpperCamelCase , opset_version=__UpperCamelCase , ) @torch.no_grad() def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> List[Any]: _lowerCAmelCase =torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): _lowerCAmelCase ="""cuda""" elif fpaa and not torch.cuda.is_available(): raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" ) else: _lowerCAmelCase ="""cpu""" _lowerCAmelCase =Path(__UpperCamelCase ) # VAE DECODER _lowerCAmelCase =AutoencoderKL.from_pretrained(model_path + """/vae""" ) _lowerCAmelCase =vae_decoder.config.latent_channels # forward only through the decoder part _lowerCAmelCase =vae_decoder.decode onnx_export( __UpperCamelCase , model_args=( torch.randn(1 , __UpperCamelCase , 25 , 25 ).to(device=__UpperCamelCase , dtype=__UpperCamelCase ), False, ) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={ """latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""}, } , opset=__UpperCamelCase , ) del vae_decoder if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( '--model_path', type=str, required=True, help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).', ) parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.') parser.add_argument( '--opset', default=14, type=int, help='The version of the ONNX operator set to use.', ) parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode') __A = parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print('SD: Done: ONNX')
341
"""simple docstring""" import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class lowerCamelCase__ : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=16 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=30 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=None , ) -> Any: _lowerCAmelCase =parent _lowerCAmelCase =batch_size _lowerCAmelCase =decoder_seq_length # For common tests _lowerCAmelCase =self.decoder_seq_length _lowerCAmelCase =is_training _lowerCAmelCase =use_attention_mask _lowerCAmelCase =use_labels _lowerCAmelCase =vocab_size _lowerCAmelCase =d_model _lowerCAmelCase =d_model _lowerCAmelCase =decoder_layers _lowerCAmelCase =decoder_layers _lowerCAmelCase =decoder_ffn_dim _lowerCAmelCase =decoder_attention_heads _lowerCAmelCase =decoder_attention_heads _lowerCAmelCase =eos_token_id _lowerCAmelCase =bos_token_id _lowerCAmelCase =pad_token_id _lowerCAmelCase =decoder_start_token_id _lowerCAmelCase =use_cache _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =None _lowerCAmelCase =decoder_seq_length _lowerCAmelCase =2 _lowerCAmelCase =1 def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) _lowerCAmelCase =None if self.use_attention_mask: _lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) _lowerCAmelCase =None if self.use_labels: _lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) _lowerCAmelCase =TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[Any]: _lowerCAmelCase =True _lowerCAmelCase =TrOCRDecoder(config=__UpperCAmelCase ).to(__UpperCAmelCase ).eval() _lowerCAmelCase =input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass _lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase ) _lowerCAmelCase =model(__UpperCAmelCase ) _lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase ) self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) ) self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 ) _lowerCAmelCase =outputs["""past_key_values"""] # create hypothetical next token and extent to next_input_ids _lowerCAmelCase =ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and _lowerCAmelCase =torch.cat([input_ids, next_tokens] , dim=-1 ) _lowerCAmelCase =model(__UpperCAmelCase )["""last_hidden_state"""] _lowerCAmelCase =model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )["""last_hidden_state"""] # select random slice _lowerCAmelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item() _lowerCAmelCase =output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() _lowerCAmelCase =output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) def _lowerCAmelCase ( self ) -> List[str]: _lowerCAmelCase =self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs _lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_torch class lowerCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () lowerCamelCase = (TrOCRForCausalLM,) if is_torch_available() else () lowerCamelCase = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {} lowerCamelCase = True lowerCamelCase = False def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =TrOCRStandaloneDecoderModelTester(self , is_training=__UpperCAmelCase ) _lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> List[str]: pass def _lowerCAmelCase ( self ) -> List[Any]: pass def _lowerCAmelCase ( self ) -> Any: pass def _lowerCAmelCase ( self ) -> Optional[Any]: self.config_tester.run_common_tests() def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Tuple: return @unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :) def _lowerCAmelCase ( self ) -> str: pass
341
1
"""simple docstring""" import math import sys def _lowerCamelCase(__UpperCamelCase ) -> str: _lowerCAmelCase ="""""" try: with open(__UpperCamelCase , """rb""" ) as binary_file: _lowerCAmelCase =binary_file.read() for dat in data: _lowerCAmelCase =F'''{dat:08b}''' result += curr_byte return result except OSError: print("""File not accessible""" ) sys.exit() def _lowerCamelCase(__UpperCamelCase ) -> str: _lowerCAmelCase ={"""0""": """0""", """1""": """1"""} _lowerCAmelCase , _lowerCAmelCase ="""""", """""" _lowerCAmelCase =len(__UpperCamelCase ) for i in range(len(__UpperCamelCase ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue _lowerCAmelCase =lexicon[curr_string] result += last_match_id _lowerCAmelCase =last_match_id + """0""" if math.loga(__UpperCamelCase ).is_integer(): _lowerCAmelCase ={} for curr_key in list(__UpperCamelCase ): _lowerCAmelCase =lexicon.pop(__UpperCamelCase ) _lowerCAmelCase =new_lex _lowerCAmelCase =last_match_id + """1""" index += 1 _lowerCAmelCase ="""""" return result def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> None: _lowerCAmelCase =8 try: with open(__UpperCamelCase , """wb""" ) as opened_file: _lowerCAmelCase =[ to_write[i : i + byte_length] for i in range(0 , len(__UpperCamelCase ) , __UpperCamelCase ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append("""10000000""" ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(__UpperCamelCase , 2 ).to_bytes(1 , byteorder="""big""" ) ) except OSError: print("""File not accessible""" ) sys.exit() def _lowerCamelCase(__UpperCamelCase ) -> str: _lowerCAmelCase =0 for letter in data_bits: if letter == "1": break counter += 1 _lowerCAmelCase =data_bits[counter:] _lowerCAmelCase =data_bits[counter + 1 :] return data_bits def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> None: _lowerCAmelCase =read_file_binary(__UpperCamelCase ) _lowerCAmelCase =remove_prefix(__UpperCamelCase ) _lowerCAmelCase =decompress_data(__UpperCamelCase ) write_file_binary(__UpperCamelCase , __UpperCamelCase ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
341
"""simple docstring""" import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' lowerCamelCase = JukeboxTokenizer lowerCamelCase = { '''artist''': '''Zac Brown Band''', '''genres''': '''Country''', '''lyrics''': '''I met a traveller from an antique land, Who said "Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away ''', } @require_torch def _lowerCAmelCase ( self ) -> str: import torch _lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" ) _lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""] # fmt: off _lowerCAmelCase =[ torch.tensor([[ 0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def _lowerCAmelCase ( self ) -> Any: import torch _lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" ) _lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""] # fmt: off _lowerCAmelCase =[ torch.tensor([[ 0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
341
1
"""simple docstring""" __A = [ (1000, 'M'), (900, 'CM'), (500, 'D'), (400, 'CD'), (100, 'C'), (90, 'XC'), (50, 'L'), (40, 'XL'), (10, 'X'), (9, 'IX'), (5, 'V'), (4, 'IV'), (1, 'I'), ] def _lowerCamelCase(__UpperCamelCase ) -> int: _lowerCAmelCase ={"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1000} _lowerCAmelCase =0 _lowerCAmelCase =0 while place < len(__UpperCamelCase ): if (place + 1 < len(__UpperCamelCase )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def _lowerCamelCase(__UpperCamelCase ) -> str: _lowerCAmelCase =[] for arabic, roman in ROMAN: ((_lowerCAmelCase) , (_lowerCAmelCase)) =divmod(__UpperCamelCase , __UpperCamelCase ) result.append(roman * factor ) if number == 0: break return "".join(__UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
341
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __A = logging.get_logger(__name__) __A = '▁' __A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'} __A = { 'vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model', }, 'monolingual_vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt', }, } __A = {'vinai/bartpho-syllable': 1024} class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = VOCAB_FILES_NAMES lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token _lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , ) _lowerCAmelCase =vocab_file _lowerCAmelCase =monolingual_vocab_file _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__UpperCAmelCase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility _lowerCAmelCase ={} _lowerCAmelCase =0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids: _lowerCAmelCase =cnt cnt += 1 with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f: for line in f.readlines(): _lowerCAmelCase =line.strip().split()[0] _lowerCAmelCase =len(self.fairseq_tokens_to_ids ) if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids: _lowerCAmelCase =len(self.fairseq_tokens_to_ids ) _lowerCAmelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> Dict: _lowerCAmelCase =self.__dict__.copy() _lowerCAmelCase =None _lowerCAmelCase =self.sp_model.serialized_model_proto() return state def __setstate__( self , __UpperCAmelCase ) -> List[Any]: _lowerCAmelCase =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _lowerCAmelCase ={} _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _lowerCAmelCase =[self.cls_token_id] _lowerCAmelCase =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1] def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _lowerCAmelCase =[self.sep_token_id] _lowerCAmelCase =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _lowerCAmelCase ( self ) -> Union[str, Any]: return len(self.fairseq_ids_to_tokens ) def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]: return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]: return self.fairseq_ids_to_tokens[index] def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]: _lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip() return out_string def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _lowerCAmelCase =os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) _lowerCAmelCase =os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase , """wb""" ) as fi: _lowerCAmelCase =self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( __UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'''{str(__UpperCAmelCase )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
341
1
"""simple docstring""" import math from collections.abc import Iterator from itertools import takewhile def _lowerCamelCase(__UpperCamelCase ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__UpperCamelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _lowerCamelCase() -> Iterator[int]: _lowerCAmelCase =2 while True: if is_prime(__UpperCamelCase ): yield num num += 1 def _lowerCamelCase(__UpperCamelCase = 2000000 ) -> int: return sum(takewhile(lambda __UpperCamelCase : x < n , prime_generator() ) ) if __name__ == "__main__": print(F"""{solution() = }""")
341
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =1 _lowerCAmelCase =3 _lowerCAmelCase =(32, 32) _lowerCAmelCase =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase ) return image @property def _lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) _lowerCAmelCase =UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , ) return model @property def _lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) _lowerCAmelCase =AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) return model @property def _lowerCAmelCase ( self ) -> Optional[Any]: torch.manual_seed(0 ) _lowerCAmelCase =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , ) return CLIPTextModel(__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase =self.dummy_cond_unet_upscale _lowerCAmelCase =DDPMScheduler() _lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" ) _lowerCAmelCase =self.dummy_vae _lowerCAmelCase =self.dummy_text_encoder _lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk _lowerCAmelCase =StableDiffusionUpscalePipeline( unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , ) _lowerCAmelCase =sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase ="""A painting of a squirrel eating a burger""" _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) _lowerCAmelCase =output.images _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=__UpperCAmelCase , )[0] _lowerCAmelCase =image[0, -3:, -3:, -1] _lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1] _lowerCAmelCase =low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) _lowerCAmelCase =np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase =self.dummy_cond_unet_upscale _lowerCAmelCase =DDPMScheduler() _lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" ) _lowerCAmelCase =self.dummy_vae _lowerCAmelCase =self.dummy_text_encoder _lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk _lowerCAmelCase =StableDiffusionUpscalePipeline( unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , ) _lowerCAmelCase =sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase ="""A painting of a squirrel eating a burger""" _lowerCAmelCase =sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) _lowerCAmelCase =output.images assert image.shape[0] == 2 _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) _lowerCAmelCase =output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =self.dummy_cond_unet_upscale _lowerCAmelCase =DDPMScheduler() _lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" ) _lowerCAmelCase =self.dummy_vae _lowerCAmelCase =self.dummy_text_encoder _lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 _lowerCAmelCase =unet.half() _lowerCAmelCase =text_encoder.half() # make sure here that pndm scheduler skips prk _lowerCAmelCase =StableDiffusionUpscalePipeline( unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , ) _lowerCAmelCase =sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase ="""A painting of a squirrel eating a burger""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , ).images _lowerCAmelCase =low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) _lowerCAmelCase =load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat.npy""" ) _lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler""" _lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _lowerCAmelCase ="""a cat sitting on a park bench""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , ) _lowerCAmelCase =output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 1e-3 def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) _lowerCAmelCase =load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat_fp16.npy""" ) _lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler""" _lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _lowerCAmelCase ="""a cat sitting on a park bench""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , ) _lowerCAmelCase =output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 5e-1 def _lowerCAmelCase ( self ) -> Optional[Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _lowerCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) _lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler""" _lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() _lowerCAmelCase ="""a cat sitting on a park bench""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , output_type="""np""" , ) _lowerCAmelCase =torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
341
1
"""simple docstring""" import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = 0 lowerCamelCase = False lowerCamelCase = 3.0 class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> Optional[Any]: # If no defaults are changed, `to_kwargs` returns an empty dict. self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"""a""": 2} ) self.assertDictEqual(MockClass(a=2 , b=__UpperCAmelCase ).to_kwargs() , {"""a""": 2, """b""": True} ) self.assertDictEqual(MockClass(a=2 , c=2.2_5 ).to_kwargs() , {"""a""": 2, """c""": 2.2_5} ) @require_cuda def _lowerCAmelCase ( self ) -> int: # If no defaults are changed, `to_kwargs` returns an empty dict. _lowerCAmelCase =GradScalerKwargs(init_scale=10_24 , growth_factor=2 ) AcceleratorState._reset_state() _lowerCAmelCase =Accelerator(mixed_precision="""fp16""" , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) _lowerCAmelCase =accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1_0_2_4.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 20_00 ) self.assertEqual(scaler._enabled , __UpperCAmelCase ) @require_multi_gpu def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase =["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() ) if __name__ == "__main__": __A = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) __A = Accelerator(kwargs_handlers=[ddp_scaler]) __A = torch.nn.Linear(100, 200) __A = accelerator.prepare(model) # Check the values changed in kwargs __A = '' __A = model.bucket_bytes_cap // (1024 * 1024) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
341
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = '''cvt''' def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[64, 1_92, 3_84] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[4.0, 4.0, 4.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.1] , __UpperCAmelCase=[True, True, True] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , __UpperCAmelCase=[3, 3, 3] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-12 , **__UpperCAmelCase , ) -> Optional[Any]: super().__init__(**__UpperCAmelCase ) _lowerCAmelCase =num_channels _lowerCAmelCase =patch_sizes _lowerCAmelCase =patch_stride _lowerCAmelCase =patch_padding _lowerCAmelCase =embed_dim _lowerCAmelCase =num_heads _lowerCAmelCase =depth _lowerCAmelCase =mlp_ratio _lowerCAmelCase =attention_drop_rate _lowerCAmelCase =drop_rate _lowerCAmelCase =drop_path_rate _lowerCAmelCase =qkv_bias _lowerCAmelCase =cls_token _lowerCAmelCase =qkv_projection_method _lowerCAmelCase =kernel_qkv _lowerCAmelCase =padding_kv _lowerCAmelCase =stride_kv _lowerCAmelCase =padding_q _lowerCAmelCase =stride_q _lowerCAmelCase =initializer_range _lowerCAmelCase =layer_norm_eps
341
1
"""simple docstring""" import argparse from collections import defaultdict def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]: _lowerCAmelCase =F'''{file}_{class_name}_{test_name}''' done_test[_id] += 1 with open(__UpperCamelCase , """r""" ) as f: _lowerCAmelCase =f.readlines() _lowerCAmelCase =F'''class {class_name}(''' _lowerCAmelCase =F'''{4 * ' '}def {test_name}(''' _lowerCAmelCase =F'''{8 * ' '}{correct_line.split()[0]}''' _lowerCAmelCase =F'''{16 * ' '}{correct_line.split()[0]}''' _lowerCAmelCase =False _lowerCAmelCase =False _lowerCAmelCase =False _lowerCAmelCase =False _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =[] for line in lines: if line.startswith(__UpperCamelCase ): _lowerCAmelCase =True elif in_class and line.startswith(__UpperCamelCase ): _lowerCAmelCase =True elif in_class and in_func and (line.startswith(__UpperCamelCase ) or line.startswith(__UpperCamelCase )): _lowerCAmelCase =len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: _lowerCAmelCase =True if in_class and in_func and in_line: if ")" not in line: continue else: _lowerCAmelCase =True if in_class and in_func and in_line and insert_line: new_lines.append(F'''{spaces * ' '}{correct_line}''' ) _lowerCAmelCase =_lowerCAmelCase =_lowerCAmelCase =_lowerCAmelCase =False else: new_lines.append(__UpperCamelCase ) with open(__UpperCamelCase , """w""" ) as f: for line in new_lines: f.write(__UpperCamelCase ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=None ) -> Dict: if fail is not None: with open(__UpperCamelCase , """r""" ) as f: _lowerCAmelCase ={l.strip() for l in f.readlines()} else: _lowerCAmelCase =None with open(__UpperCamelCase , """r""" ) as f: _lowerCAmelCase =f.readlines() _lowerCAmelCase =defaultdict(__UpperCamelCase ) for line in correct_lines: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =line.split(""";""" ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument('--correct_filename', help='filename of tests with expected result') parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None) __A = parser.parse_args() main(args.correct_filename, args.fail_filename)
341
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = ['''image_processor''', '''tokenizer'''] lowerCamelCase = '''CLIPImageProcessor''' lowerCamelCase = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''') def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Union[str, Any]: _lowerCAmelCase =None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __UpperCAmelCase , ) _lowerCAmelCase =kwargs.pop("""feature_extractor""" ) _lowerCAmelCase =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Optional[Any]: if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: _lowerCAmelCase =self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if images is not None: _lowerCAmelCase =self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if text is not None and images is not None: _lowerCAmelCase =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase ) def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @property def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =self.tokenizer.model_input_names _lowerCAmelCase =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
341
1
"""simple docstring""" import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase =self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__UpperCAmelCase , """width_multiplier""" ) ) class lowerCamelCase__ : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase="swish" , __UpperCAmelCase=3 , __UpperCAmelCase=32 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=None , __UpperCAmelCase=0.2_5 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , ) -> Optional[Any]: _lowerCAmelCase =parent _lowerCAmelCase =batch_size _lowerCAmelCase =image_size _lowerCAmelCase =patch_size _lowerCAmelCase =num_channels _lowerCAmelCase =make_divisible(5_12 * width_multiplier , divisor=8 ) _lowerCAmelCase =hidden_act _lowerCAmelCase =conv_kernel_size _lowerCAmelCase =output_stride _lowerCAmelCase =classifier_dropout_prob _lowerCAmelCase =use_labels _lowerCAmelCase =is_training _lowerCAmelCase =num_labels _lowerCAmelCase =initializer_range _lowerCAmelCase =scope _lowerCAmelCase =width_multiplier _lowerCAmelCase =ffn_dropout _lowerCAmelCase =attn_dropout def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase =None _lowerCAmelCase =None if self.use_labels: _lowerCAmelCase =ids_tensor([self.batch_size] , self.num_labels ) _lowerCAmelCase =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _lowerCAmelCase =self.get_config() return config, pixel_values, labels, pixel_labels def _lowerCAmelCase ( self ) -> str: return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: _lowerCAmelCase =MobileViTVaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _lowerCAmelCase =model(__UpperCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: _lowerCAmelCase =self.num_labels _lowerCAmelCase =MobileViTVaForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _lowerCAmelCase =model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: _lowerCAmelCase =self.num_labels _lowerCAmelCase =MobileViTVaForSemanticSegmentation(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _lowerCAmelCase =model(__UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) _lowerCAmelCase =model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs _lowerCAmelCase ={"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) lowerCamelCase = ( { '''feature-extraction''': MobileViTVaModel, '''image-classification''': MobileViTVaForImageClassification, '''image-segmentation''': MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def _lowerCAmelCase ( self ) -> Optional[int]: _lowerCAmelCase =MobileViTVaModelTester(self ) _lowerCAmelCase =MobileViTVaConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> int: self.config_tester.run_common_tests() @unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" ) def _lowerCAmelCase ( self ) -> str: pass @unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" ) def _lowerCAmelCase ( self ) -> Optional[Any]: pass @unittest.skip(reason="""MobileViTV2 does not output attentions""" ) def _lowerCAmelCase ( self ) -> List[Any]: pass @require_torch_multi_gpu @unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" ) def _lowerCAmelCase ( self ) -> List[Any]: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def _lowerCAmelCase ( self ) -> str: pass def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase =model_class(__UpperCAmelCase ) _lowerCAmelCase =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase =[*signature.parameters.keys()] _lowerCAmelCase =["""pixel_values"""] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> str: def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): _lowerCAmelCase =model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): _lowerCAmelCase =model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) _lowerCAmelCase =outputs.hidden_states _lowerCAmelCase =5 self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. _lowerCAmelCase =2 for i in range(len(__UpperCAmelCase ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase =True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase =True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Dict: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase ) @slow def _lowerCAmelCase ( self ) -> Union[str, Any]: for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase =MobileViTVaModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def _lowerCamelCase() -> Optional[Any]: _lowerCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def _lowerCAmelCase ( self ) -> Tuple: return ( MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ) if is_vision_available() else None ) @slow def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to( __UpperCAmelCase ) _lowerCAmelCase =self.default_image_processor _lowerCAmelCase =prepare_img() _lowerCAmelCase =image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): _lowerCAmelCase =model(**__UpperCAmelCase ) # verify the logits _lowerCAmelCase =torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) _lowerCAmelCase =torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) ) @slow def _lowerCAmelCase ( self ) -> str: _lowerCAmelCase =MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) _lowerCAmelCase =model.to(__UpperCAmelCase ) _lowerCAmelCase =MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) _lowerCAmelCase =prepare_img() _lowerCAmelCase =image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): _lowerCAmelCase =model(**__UpperCAmelCase ) _lowerCAmelCase =outputs.logits # verify the logits _lowerCAmelCase =torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , __UpperCAmelCase ) _lowerCAmelCase =torch.tensor( [ [[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]], [[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]], [[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]], ] , device=__UpperCAmelCase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1e-4 ) ) @slow def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase =MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) _lowerCAmelCase =model.to(__UpperCAmelCase ) _lowerCAmelCase =MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" ) _lowerCAmelCase =prepare_img() _lowerCAmelCase =image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): _lowerCAmelCase =model(**__UpperCAmelCase ) _lowerCAmelCase =outputs.logits.detach().cpu() _lowerCAmelCase =image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase , target_sizes=[(50, 60)] ) _lowerCAmelCase =torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , __UpperCAmelCase ) _lowerCAmelCase =image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase ) _lowerCAmelCase =torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
341
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __A = { 'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'], 'tokenization_perceiver': ['PerceiverTokenizer'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['PerceiverFeatureExtractor'] __A = ['PerceiverImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PerceiverForImageClassificationConvProcessing', 'PerceiverForImageClassificationFourier', 'PerceiverForImageClassificationLearned', 'PerceiverForMaskedLM', 'PerceiverForMultimodalAutoencoding', 'PerceiverForOpticalFlow', 'PerceiverForSequenceClassification', 'PerceiverLayer', 'PerceiverModel', 'PerceiverPreTrainedModel', ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
1
"""simple docstring""" import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def _lowerCamelCase(__UpperCamelCase ) -> Tuple: _lowerCAmelCase =VideoMAEConfig() set_architecture_configs(__UpperCamelCase , __UpperCamelCase ) if "finetuned" not in model_name: _lowerCAmelCase =False if "finetuned" in model_name: _lowerCAmelCase ="""huggingface/label-files""" if "kinetics" in model_name: _lowerCAmelCase =400 _lowerCAmelCase ="""kinetics400-id2label.json""" elif "ssv2" in model_name: _lowerCAmelCase =174 _lowerCAmelCase ="""something-something-v2-id2label.json""" else: raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" ) _lowerCAmelCase =json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) , """r""" ) ) _lowerCAmelCase ={int(__UpperCamelCase ): v for k, v in idalabel.items()} _lowerCAmelCase =idalabel _lowerCAmelCase ={v: k for k, v in idalabel.items()} return config def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> List[Any]: if "small" in model_name: _lowerCAmelCase =384 _lowerCAmelCase =1536 _lowerCAmelCase =12 _lowerCAmelCase =16 _lowerCAmelCase =12 _lowerCAmelCase =3 _lowerCAmelCase =192 _lowerCAmelCase =768 elif "large" in model_name: _lowerCAmelCase =1024 _lowerCAmelCase =4096 _lowerCAmelCase =24 _lowerCAmelCase =16 _lowerCAmelCase =12 _lowerCAmelCase =8 _lowerCAmelCase =512 _lowerCAmelCase =2048 elif "huge" in model_name: _lowerCAmelCase =1280 _lowerCAmelCase =5120 _lowerCAmelCase =32 _lowerCAmelCase =16 _lowerCAmelCase =12 _lowerCAmelCase =8 _lowerCAmelCase =640 _lowerCAmelCase =2560 elif "base" not in model_name: raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" ) def _lowerCamelCase(__UpperCamelCase ) -> Union[str, Any]: if "encoder." in name: _lowerCAmelCase =name.replace("""encoder.""" , """""" ) if "cls_token" in name: _lowerCAmelCase =name.replace("""cls_token""" , """videomae.embeddings.cls_token""" ) if "decoder_pos_embed" in name: _lowerCAmelCase =name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" ) if "pos_embed" in name and "decoder" not in name: _lowerCAmelCase =name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" ) if "patch_embed.proj" in name: _lowerCAmelCase =name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: _lowerCAmelCase =name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" ) if "decoder.blocks" in name: _lowerCAmelCase =name.replace("""decoder.blocks""" , """decoder.decoder_layers""" ) if "blocks" in name: _lowerCAmelCase =name.replace("""blocks""" , """videomae.encoder.layer""" ) if "attn.proj" in name: _lowerCAmelCase =name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name and "bias" not in name: _lowerCAmelCase =name.replace("""attn""" , """attention.self""" ) if "attn" in name: _lowerCAmelCase =name.replace("""attn""" , """attention.attention""" ) if "norm1" in name: _lowerCAmelCase =name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: _lowerCAmelCase =name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: _lowerCAmelCase =name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: _lowerCAmelCase =name.replace("""mlp.fc2""" , """output.dense""" ) if "decoder_embed" in name: _lowerCAmelCase =name.replace("""decoder_embed""" , """decoder.decoder_embed""" ) if "decoder_norm" in name: _lowerCAmelCase =name.replace("""decoder_norm""" , """decoder.decoder_norm""" ) if "decoder_pred" in name: _lowerCAmelCase =name.replace("""decoder_pred""" , """decoder.decoder_pred""" ) if "norm.weight" in name and "decoder" not in name and "fc" not in name: _lowerCAmelCase =name.replace("""norm.weight""" , """videomae.layernorm.weight""" ) if "norm.bias" in name and "decoder" not in name and "fc" not in name: _lowerCAmelCase =name.replace("""norm.bias""" , """videomae.layernorm.bias""" ) if "head" in name and "decoder" not in name: _lowerCAmelCase =name.replace("""head""" , """classifier""" ) return name def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> str: for key in orig_state_dict.copy().keys(): _lowerCAmelCase =orig_state_dict.pop(__UpperCamelCase ) if key.startswith("""encoder.""" ): _lowerCAmelCase =key.replace("""encoder.""" , """""" ) if "qkv" in key: _lowerCAmelCase =key.split(""".""" ) if key.startswith("""decoder.blocks""" ): _lowerCAmelCase =config.decoder_hidden_size _lowerCAmelCase =int(key_split[2] ) _lowerCAmelCase ="""decoder.decoder_layers.""" if "weight" in key: _lowerCAmelCase =val[:dim, :] _lowerCAmelCase =val[dim : dim * 2, :] _lowerCAmelCase =val[-dim:, :] else: _lowerCAmelCase =config.hidden_size _lowerCAmelCase =int(key_split[1] ) _lowerCAmelCase ="""videomae.encoder.layer.""" if "weight" in key: _lowerCAmelCase =val[:dim, :] _lowerCAmelCase =val[dim : dim * 2, :] _lowerCAmelCase =val[-dim:, :] else: _lowerCAmelCase =val return orig_state_dict def _lowerCamelCase() -> Tuple: _lowerCAmelCase =hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" ) _lowerCAmelCase =np.load(__UpperCamelCase ) return list(__UpperCamelCase ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any: _lowerCAmelCase =get_videomae_config(__UpperCamelCase ) if "finetuned" in model_name: _lowerCAmelCase =VideoMAEForVideoClassification(__UpperCamelCase ) else: _lowerCAmelCase =VideoMAEForPreTraining(__UpperCamelCase ) # download original checkpoint, hosted on Google Drive _lowerCAmelCase ="""pytorch_model.bin""" gdown.cached_download(__UpperCamelCase , __UpperCamelCase , quiet=__UpperCamelCase ) _lowerCAmelCase =torch.load(__UpperCamelCase , map_location="""cpu""" ) if "model" in files: _lowerCAmelCase =files["""model"""] else: _lowerCAmelCase =files["""module"""] _lowerCAmelCase =convert_state_dict(__UpperCamelCase , __UpperCamelCase ) model.load_state_dict(__UpperCamelCase ) model.eval() # verify model on basic input _lowerCAmelCase =VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) _lowerCAmelCase =prepare_video() _lowerCAmelCase =image_processor(__UpperCamelCase , return_tensors="""pt""" ) if "finetuned" not in model_name: _lowerCAmelCase =hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" ) _lowerCAmelCase =torch.load(__UpperCamelCase ) _lowerCAmelCase =model(**__UpperCamelCase ) _lowerCAmelCase =outputs.logits _lowerCAmelCase =[ """videomae-small-finetuned-kinetics""", """videomae-small-finetuned-ssv2""", # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) """videomae-base-short""", """videomae-base-short-finetuned-kinetics""", """videomae-base""", """videomae-base-finetuned-kinetics""", """videomae-large""", """videomae-large-finetuned-kinetics""", """videomae-huge-finetuned-kinetics""", # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) """videomae-base-short-ssv2""", """videomae-base-short-finetuned-ssv2""", """videomae-base-ssv2""", """videomae-base-finetuned-ssv2""", ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": _lowerCAmelCase =torch.Size([1, 400] ) _lowerCAmelCase =torch.tensor([-0.92_91, -0.40_61, -0.93_07] ) elif model_name == "videomae-small-finetuned-ssv2": _lowerCAmelCase =torch.Size([1, 174] ) _lowerCAmelCase =torch.tensor([0.26_71, -0.46_89, -0.82_35] ) elif model_name == "videomae-base": _lowerCAmelCase =torch.Size([1, 1408, 1536] ) _lowerCAmelCase =torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] ) elif model_name == "videomae-base-short": _lowerCAmelCase =torch.Size([1, 1408, 1536] ) _lowerCAmelCase =torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] ) # we verified the loss both for normalized and unnormalized targets for this one _lowerCAmelCase =torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] ) elif model_name == "videomae-large": _lowerCAmelCase =torch.Size([1, 1408, 1536] ) _lowerCAmelCase =torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] ) elif model_name == "videomae-large-finetuned-kinetics": _lowerCAmelCase =torch.Size([1, 400] ) _lowerCAmelCase =torch.tensor([0.07_71, 0.00_11, -0.36_25] ) elif model_name == "videomae-huge-finetuned-kinetics": _lowerCAmelCase =torch.Size([1, 400] ) _lowerCAmelCase =torch.tensor([0.24_33, 0.16_32, -0.48_94] ) elif model_name == "videomae-base-short-finetuned-kinetics": _lowerCAmelCase =torch.Size([1, 400] ) _lowerCAmelCase =torch.tensor([0.65_88, 0.09_90, -0.24_93] ) elif model_name == "videomae-base-finetuned-kinetics": _lowerCAmelCase =torch.Size([1, 400] ) _lowerCAmelCase =torch.tensor([0.36_69, -0.06_88, -0.24_21] ) elif model_name == "videomae-base-short-ssv2": _lowerCAmelCase =torch.Size([1, 1408, 1536] ) _lowerCAmelCase =torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] ) elif model_name == "videomae-base-short-finetuned-ssv2": _lowerCAmelCase =torch.Size([1, 174] ) _lowerCAmelCase =torch.tensor([-0.05_37, -0.15_39, -0.32_66] ) elif model_name == "videomae-base-ssv2": _lowerCAmelCase =torch.Size([1, 1408, 1536] ) _lowerCAmelCase =torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] ) elif model_name == "videomae-base-finetuned-ssv2": _lowerCAmelCase =torch.Size([1, 174] ) _lowerCAmelCase =torch.tensor([0.19_61, -0.83_37, -0.63_89] ) else: raise ValueError(F'''Model name not supported. Should be one of {model_names}''' ) # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1E-4 ) else: print("""Logits:""" , logits[0, :3, :3] ) assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1E-4 ) print("""Logits ok!""" ) # verify loss, if applicable if model_name == "videomae-base-short": _lowerCAmelCase =outputs.loss assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-4 ) print("""Loss ok!""" ) if pytorch_dump_folder_path is not None: print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__UpperCamelCase ) model.save_pretrained(__UpperCamelCase ) if push_to_hub: print("""Pushing to the hub...""" ) model.push_to_hub(__UpperCamelCase , organization="""nielsr""" ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4', type=str, help=( 'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct' ' download link.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default='/Users/nielsrogge/Documents/VideoMAE/Test', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __A = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
341
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = { 'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Swinv2ForImageClassification', 'Swinv2ForMaskedImageModeling', 'Swinv2Model', 'Swinv2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
1
"""simple docstring""" from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
341
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=1 ) -> Tuple: if n_shave_prefix_segments >= 0: return ".".join(path.split(""".""" )[n_shave_prefix_segments:] ) else: return ".".join(path.split(""".""" )[:n_shave_prefix_segments] ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> List[str]: _lowerCAmelCase =[] for old_item in old_list: _lowerCAmelCase =old_item.replace("""in_layers.0""" , """norm1""" ) _lowerCAmelCase =new_item.replace("""in_layers.2""" , """conv1""" ) _lowerCAmelCase =new_item.replace("""out_layers.0""" , """norm2""" ) _lowerCAmelCase =new_item.replace("""out_layers.3""" , """conv2""" ) _lowerCAmelCase =new_item.replace("""emb_layers.1""" , """time_emb_proj""" ) _lowerCAmelCase =new_item.replace("""skip_connection""" , """conv_shortcut""" ) _lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> Tuple: _lowerCAmelCase =[] for old_item in old_list: _lowerCAmelCase =old_item _lowerCAmelCase =new_item.replace("""norm.weight""" , """group_norm.weight""" ) _lowerCAmelCase =new_item.replace("""norm.bias""" , """group_norm.bias""" ) _lowerCAmelCase =new_item.replace("""proj_out.weight""" , """proj_attn.weight""" ) _lowerCAmelCase =new_item.replace("""proj_out.bias""" , """proj_attn.bias""" ) _lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None ) -> Optional[int]: assert isinstance(__UpperCamelCase , __UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): _lowerCAmelCase =old_checkpoint[path] _lowerCAmelCase =old_tensor.shape[0] // 3 _lowerCAmelCase =(-1, channels) if len(old_tensor.shape ) == 3 else (-1) _lowerCAmelCase =old_tensor.shape[0] // config["""num_head_channels"""] // 3 _lowerCAmelCase =old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =old_tensor.split(channels // num_heads , dim=1 ) _lowerCAmelCase =query.reshape(__UpperCamelCase ) _lowerCAmelCase =key.reshape(__UpperCamelCase ) _lowerCAmelCase =value.reshape(__UpperCamelCase ) for path in paths: _lowerCAmelCase =path["""new"""] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here _lowerCAmelCase =new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" ) _lowerCAmelCase =new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" ) _lowerCAmelCase =new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" ) if additional_replacements is not None: for replacement in additional_replacements: _lowerCAmelCase =new_path.replace(replacement["""old"""] , replacement["""new"""] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: _lowerCAmelCase =old_checkpoint[path["""old"""]][:, :, 0] else: _lowerCAmelCase =old_checkpoint[path["""old"""]] def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: _lowerCAmelCase ={} _lowerCAmelCase =checkpoint["""time_embed.0.weight"""] _lowerCAmelCase =checkpoint["""time_embed.0.bias"""] _lowerCAmelCase =checkpoint["""time_embed.2.weight"""] _lowerCAmelCase =checkpoint["""time_embed.2.bias"""] _lowerCAmelCase =checkpoint["""input_blocks.0.0.weight"""] _lowerCAmelCase =checkpoint["""input_blocks.0.0.bias"""] _lowerCAmelCase =checkpoint["""out.0.weight"""] _lowerCAmelCase =checkpoint["""out.0.bias"""] _lowerCAmelCase =checkpoint["""out.2.weight"""] _lowerCAmelCase =checkpoint["""out.2.bias"""] # Retrieves the keys for the input blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } # Retrieves the keys for the middle blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } # Retrieves the keys for the output blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } for i in range(1 , __UpperCamelCase ): _lowerCAmelCase =(i - 1) // (config["""num_res_blocks"""] + 1) _lowerCAmelCase =(i - 1) % (config["""num_res_blocks"""] + 1) _lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key] _lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key] if F'''input_blocks.{i}.0.op.weight''' in checkpoint: _lowerCAmelCase =checkpoint[ F'''input_blocks.{i}.0.op.weight''' ] _lowerCAmelCase =checkpoint[ F'''input_blocks.{i}.0.op.bias''' ] continue _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase ={"""old""": F'''input_blocks.{i}.0''', """new""": F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''} _lowerCAmelCase ={"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""} assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=__UpperCamelCase ) if len(__UpperCamelCase ): _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """old""": F'''input_blocks.{i}.1''', """new""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''', } _lowerCAmelCase ={ F'''input_blocks.{i}.1.qkv.bias''': { """key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', """query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', """value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''input_blocks.{i}.1.qkv.weight''': { """key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', """query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', """value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase , ) _lowerCAmelCase =middle_blocks[0] _lowerCAmelCase =middle_blocks[1] _lowerCAmelCase =middle_blocks[2] _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase ) _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase ) _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """middle_block.1.qkv.bias""": { """key""": """mid_block.attentions.0.key.bias""", """query""": """mid_block.attentions.0.query.bias""", """value""": """mid_block.attentions.0.value.bias""", }, """middle_block.1.qkv.weight""": { """key""": """mid_block.attentions.0.key.weight""", """query""": """mid_block.attentions.0.query.weight""", """value""": """mid_block.attentions.0.value.weight""", }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase ) for i in range(__UpperCamelCase ): _lowerCAmelCase =i // (config["""num_res_blocks"""] + 1) _lowerCAmelCase =i % (config["""num_res_blocks"""] + 1) _lowerCAmelCase =[shave_segments(__UpperCamelCase , 2 ) for name in output_blocks[i]] _lowerCAmelCase ={} for layer in output_block_layers: _lowerCAmelCase , _lowerCAmelCase =layer.split(""".""" )[0], shave_segments(__UpperCamelCase , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(__UpperCamelCase ) else: _lowerCAmelCase =[layer_name] if len(__UpperCamelCase ) > 1: _lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key] _lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key] _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase ={"""old""": F'''output_blocks.{i}.0''', """new""": F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''} assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): _lowerCAmelCase =list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] ) _lowerCAmelCase =checkpoint[ F'''output_blocks.{i}.{index}.conv.weight''' ] _lowerCAmelCase =checkpoint[ F'''output_blocks.{i}.{index}.conv.bias''' ] # Clear attentions as they have been attributed above. if len(__UpperCamelCase ) == 2: _lowerCAmelCase =[] if len(__UpperCamelCase ): _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """old""": F'''output_blocks.{i}.1''', """new""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''', } _lowerCAmelCase ={ F'''output_blocks.{i}.1.qkv.bias''': { """key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', """query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', """value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''output_blocks.{i}.1.qkv.weight''': { """key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', """query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', """value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=__UpperCamelCase , ) else: _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase , n_shave_prefix_segments=1 ) for path in resnet_0_paths: _lowerCAmelCase =""".""".join(["""output_blocks""", str(__UpperCamelCase ), path["""old"""]] ) _lowerCAmelCase =""".""".join(["""up_blocks""", str(__UpperCamelCase ), """resnets""", str(__UpperCamelCase ), path["""new"""]] ) _lowerCAmelCase =checkpoint[old_path] return new_checkpoint if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the architecture.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') __A = parser.parse_args() __A = torch.load(args.checkpoint_path) with open(args.config_file) as f: __A = json.loads(f.read()) __A = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] __A = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: __A = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1])) __A = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1])) __A = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
341
1
"""simple docstring""" import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument __A = { '/attention/': '/0/SelfAttention/', '/self_attention/': '/0/SelfAttention/', '/encoder_decoder_attention/': '/1/EncDecAttention/', 'value': 'v', 'query': 'q', 'key': 'k', 'out': 'o', 'pre_self_attention_layer_norm': '0/layer_norm', 'pre_cross_attention_layer_norm': '1/layer_norm', 'pre_attention_layer_norm': '0/layer_norm', # previously 1, but seems wrong 'token_embedder': 'shared', 'encoder_norm': 'final_layer_norm', 'decoder_norm': 'final_layer_norm', 'relpos_bias/rel_embedding': 'block/0/layer/0/SelfAttention/relative_attention_bias/weight', 'router/router_weights/w/': 'router/classifier/', 'roer/roer_weights/w/': 'router/classifier/', 'logits_dense': 'lm_head', } def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]: # 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in # the original model _lowerCAmelCase =list(s_dict.keys() ) for key in keys: _lowerCAmelCase =R""".*/layers_(\d+)""" _lowerCAmelCase =key if re.match(__UpperCamelCase , __UpperCamelCase ): _lowerCAmelCase =re.sub(R"""layers_(\d+)""" , R"""block/\1/layer""" , __UpperCamelCase ) _lowerCAmelCase =R"""(encoder|decoder)\/""" if re.match(__UpperCamelCase , __UpperCamelCase ): _lowerCAmelCase =re.match(__UpperCamelCase , __UpperCamelCase ).groups() if groups[0] == "encoder": _lowerCAmelCase =re.sub(R"""/mlp/""" , R"""/1/mlp/""" , __UpperCamelCase ) _lowerCAmelCase =re.sub(R"""/pre_mlp_layer_norm/""" , R"""/1/layer_norm/""" , __UpperCamelCase ) elif groups[0] == "decoder": _lowerCAmelCase =re.sub(R"""/mlp/""" , R"""/2/mlp/""" , __UpperCamelCase ) _lowerCAmelCase =re.sub(R"""/pre_mlp_layer_norm/""" , R"""/2/layer_norm/""" , __UpperCamelCase ) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: _lowerCAmelCase =new_key.replace(__UpperCamelCase , __UpperCamelCase ) print(F'''{key} -> {new_key}''' ) _lowerCAmelCase =s_dict.pop(__UpperCamelCase ) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: _lowerCAmelCase =s_dict[ """encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight""" ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: _lowerCAmelCase =s_dict[ """decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight""" ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys() ): if "expert" in key: _lowerCAmelCase =s_dict[key].shape[0] _lowerCAmelCase =s_dict[key] for idx in range(__UpperCamelCase ): _lowerCAmelCase =expert_weihts[idx] print(F'''{key} -> {key.replace('expert/' , 'nested fstring' )}''' ) s_dict.pop(__UpperCamelCase ) return s_dict __A = { 'NUM_ENCODER_LAYERS': 'num_layers', 'NUM_DECODER_LAYERS': 'num_decoder_layers', 'NUM_HEADS': 'num_heads', 'HEAD_DIM': 'd_kv', 'EMBED_DIM': 'd_model', 'MLP_DIM': 'd_ff', 'NUM_SELECTED_EXPERTS': 'num_selected_experts', 'NUM_ENCODER_SPARSE_LAYERS': 'num_sparse_encoder_layers', 'NUM_DECODER_SPARSE_LAYERS': 'num_sparse_decoder_layers', 'dense.MlpBlock.activations': 'feed_forward_proj', } def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Tuple: # Convert a google style config to the hugging face fromat import regex as re with open(__UpperCamelCase , """r""" ) as f: _lowerCAmelCase =f.read() _lowerCAmelCase =re.findall(R"""(.*) = ([0-9.]*)""" , __UpperCamelCase ) _lowerCAmelCase ={} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": _lowerCAmelCase =float(__UpperCamelCase ) if """.""" in value else int(__UpperCamelCase ) _lowerCAmelCase =re.findall(R"""(.*activations) = \(\'(.*)\',\)""" , __UpperCamelCase )[0] _lowerCAmelCase =str(activation[1] ) _lowerCAmelCase =num_experts _lowerCAmelCase =SwitchTransformersConfig(**__UpperCamelCase ) return config def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase="./" , __UpperCamelCase=8 ) -> Tuple: # Initialise PyTorch model print(F'''Loading flax weights from : {flax_checkpoint_path}''' ) _lowerCAmelCase =checkpoints.load_tax_checkpoint(__UpperCamelCase ) if gin_file is not None: _lowerCAmelCase =convert_gin_to_config(__UpperCamelCase , __UpperCamelCase ) else: _lowerCAmelCase =SwitchTransformersConfig.from_pretrained(__UpperCamelCase ) _lowerCAmelCase =SwitchTransformersForConditionalGeneration(__UpperCamelCase ) _lowerCAmelCase =flax_params["""target"""] _lowerCAmelCase =flatten_dict(__UpperCamelCase , sep="""/""" ) _lowerCAmelCase =rename_keys(__UpperCamelCase ) _lowerCAmelCase =unflatten_dict(__UpperCamelCase , sep="""/""" ) # Load the flax params in the PT model load_flax_weights_in_pytorch_model(__UpperCamelCase , __UpperCamelCase ) print(F'''Save PyTorch model to {pytorch_dump_path}''' ) pt_model.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( '--switch_t5x_checkpoint_path', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the' ' model architecture. If not provided, a `gin_file` has to be provided.' ), ) parser.add_argument( '--gin_file', default=None, type=str, required=False, help='Path to the gin config file. If not provided, a `config_file` has to be passed ', ) parser.add_argument( '--config_name', default=None, type=str, required=False, help='Config name of SwitchTransformers model.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output pytorch model.' ) parser.add_argument('--num_experts', default=8, type=int, required=False, help='Number of experts') __A = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
341
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]: _lowerCAmelCase =0 _lowerCAmelCase =len(__UpperCamelCase ) for i in range(n - 1 ): for j in range(i + 1 , __UpperCamelCase ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def _lowerCamelCase(__UpperCamelCase ) -> List[Any]: if len(__UpperCamelCase ) <= 1: return arr, 0 _lowerCAmelCase =len(__UpperCamelCase ) // 2 _lowerCAmelCase =arr[0:mid] _lowerCAmelCase =arr[mid:] _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =_count_cross_inversions(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =inversion_p + inversions_q + cross_inversions return c, num_inversions def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Any: _lowerCAmelCase =[] _lowerCAmelCase =_lowerCAmelCase =_lowerCAmelCase =0 while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(__UpperCamelCase ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(__UpperCamelCase ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def _lowerCamelCase() -> str: _lowerCAmelCase =[10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) _lowerCAmelCase =count_inversions_bf(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 8 print("""number of inversions = """ , __UpperCamelCase ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() _lowerCAmelCase =count_inversions_bf(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , __UpperCamelCase ) # an empty list should also have zero inversions _lowerCAmelCase =[] _lowerCAmelCase =count_inversions_bf(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , __UpperCamelCase ) if __name__ == "__main__": main()
341
1
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> list: _lowerCAmelCase =len(__UpperCamelCase ) _lowerCAmelCase =[] for i in range(len(__UpperCamelCase ) - pat_len + 1 ): _lowerCAmelCase =True for j in range(__UpperCamelCase ): if s[i + j] != pattern[j]: _lowerCAmelCase =False break if match_found: position.append(__UpperCamelCase ) return position if __name__ == "__main__": assert naive_pattern_search('ABCDEFG', 'DE') == [3] print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
341
"""simple docstring""" import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class lowerCamelCase__ : '''simple docstring''' lowerCamelCase = None lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = None lowerCamelCase = None lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = True lowerCamelCase = None lowerCamelCase = 1 lowerCamelCase = None lowerCamelCase = False lowerCamelCase = None lowerCamelCase = None def _lowerCAmelCase ( self ) -> "DownloadConfig": return self.__class__(**{k: copy.deepcopy(__UpperCAmelCase ) for k, v in self.__dict__.items()} )
341
1
"""simple docstring""" import os import unittest from transformers import LxmertTokenizer, LxmertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = LxmertTokenizer lowerCamelCase = LxmertTokenizerFast lowerCamelCase = True lowerCamelCase = True def _lowerCAmelCase ( self ) -> Optional[int]: super().setUp() _lowerCAmelCase =[ """[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] _lowerCAmelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]: _lowerCAmelCase ="""UNwant\u00E9d,running""" _lowerCAmelCase ="""unwanted, running""" return input_text, output_text def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase =self.tokenizer_class(self.vocab_file ) _lowerCAmelCase =tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(__UpperCAmelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [7, 4, 5, 10, 8, 9] ) def _lowerCAmelCase ( self ) -> Dict: if not self.test_rust_tokenizer: return _lowerCAmelCase =self.get_tokenizer() _lowerCAmelCase =self.get_rust_tokenizer() _lowerCAmelCase ="""I was born in 92000, and this is falsé.""" _lowerCAmelCase =tokenizer.tokenize(__UpperCAmelCase ) _lowerCAmelCase =rust_tokenizer.tokenize(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) _lowerCAmelCase =tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) _lowerCAmelCase =self.get_rust_tokenizer() _lowerCAmelCase =tokenizer.encode(__UpperCAmelCase ) _lowerCAmelCase =rust_tokenizer.encode(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
341
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> int: return int((input_a, input_a).count(1 ) != 0 ) def _lowerCamelCase() -> None: assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
341
1
"""simple docstring""" import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase ="""ylacombe/bark-small""" _lowerCAmelCase =tempfile.mkdtemp() _lowerCAmelCase ="""en_speaker_1""" _lowerCAmelCase ="""This is a test string""" _lowerCAmelCase ="""speaker_embeddings_path.json""" _lowerCAmelCase ="""speaker_embeddings""" def _lowerCAmelCase ( self , **__UpperCAmelCase ) -> Tuple: return AutoTokenizer.from_pretrained(self.checkpoint , **__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Union[str, Any]: shutil.rmtree(self.tmpdirname ) def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =self.get_tokenizer() _lowerCAmelCase =BarkProcessor(tokenizer=__UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) _lowerCAmelCase =BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def _lowerCAmelCase ( self ) -> Optional[int]: _lowerCAmelCase =BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) _lowerCAmelCase =self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) _lowerCAmelCase =BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) _lowerCAmelCase =35 _lowerCAmelCase =2 _lowerCAmelCase =8 _lowerCAmelCase ={ """semantic_prompt""": np.ones(__UpperCAmelCase ), """coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ), """fine_prompt""": np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset _lowerCAmelCase =processor(text=self.input_string , voice_preset=__UpperCAmelCase ) _lowerCAmelCase =inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__UpperCAmelCase , np.array([] ) ).tolist() ) # test loading voice preset from npz file _lowerCAmelCase =os.path.join(self.tmpdirname , """file.npz""" ) np.savez(__UpperCAmelCase , **__UpperCAmelCase ) _lowerCAmelCase =processor(text=self.input_string , voice_preset=__UpperCAmelCase ) _lowerCAmelCase =inputs["""history_prompt"""] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__UpperCAmelCase , np.array([] ) ).tolist() ) # test loading voice preset from the hub _lowerCAmelCase =processor(text=self.input_string , voice_preset=self.voice_preset ) def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase =self.get_tokenizer() _lowerCAmelCase =BarkProcessor(tokenizer=__UpperCAmelCase ) _lowerCAmelCase =processor(text=self.input_string ) _lowerCAmelCase =tokenizer( self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
341
"""simple docstring""" import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py __A = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n' __A = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n' __A = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): '''simple docstring''' def _lowerCAmelCase ( self ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[ """https://en.wikipedia.org/wiki/BLEU""", """https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""", ] , ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=4 , __UpperCAmelCase=False ) -> Tuple: _lowerCAmelCase =compute_bleu( reference_corpus=__UpperCAmelCase , translation_corpus=__UpperCAmelCase , max_order=__UpperCAmelCase , smooth=__UpperCAmelCase ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) =score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
341
1
"""simple docstring""" import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py __A = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n' __A = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n' __A = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): '''simple docstring''' def _lowerCAmelCase ( self ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[ """https://en.wikipedia.org/wiki/BLEU""", """https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""", ] , ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=4 , __UpperCAmelCase=False ) -> Tuple: _lowerCAmelCase =compute_bleu( reference_corpus=__UpperCAmelCase , translation_corpus=__UpperCAmelCase , max_order=__UpperCAmelCase , smooth=__UpperCAmelCase ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) =score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
341
"""simple docstring""" import argparse from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--original_config_file', type=str, required=True, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--image_size', default=512, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') def _lowerCamelCase(__UpperCamelCase ) -> List[str]: if string == "True": return True elif string == "False": return False else: raise ValueError(F'''could not parse string as bool {string}''' ) parser.add_argument( '--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool ) parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int) __A = parser.parse_args() __A = download_controlnet_from_original_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, extract_ema=args.extract_ema, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, use_linear_projection=args.use_linear_projection, cross_attention_dim=args.cross_attention_dim, ) controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
341
1
"""simple docstring""" from __future__ import annotations from dataclasses import dataclass @dataclass class lowerCamelCase__ : '''simple docstring''' lowerCamelCase = 42 lowerCamelCase = None lowerCamelCase = None def _lowerCamelCase(__UpperCamelCase ) -> bool: # Validation def is_valid_tree(__UpperCamelCase ) -> bool: if node is None: return True if not isinstance(__UpperCamelCase , __UpperCamelCase ): return False try: float(node.data ) except (TypeError, ValueError): return False return is_valid_tree(node.left ) and is_valid_tree(node.right ) if not is_valid_tree(__UpperCamelCase ): raise ValueError( """Each node should be type of TreeNode and data should be float.""" ) def is_binary_search_tree_recursive_check( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> bool: if node is None: return True return ( left_bound < node.data < right_bound and is_binary_search_tree_recursive_check(node.left , __UpperCamelCase , node.data ) and is_binary_search_tree_recursive_check( node.right , node.data , __UpperCamelCase ) ) return is_binary_search_tree_recursive_check(__UpperCamelCase , -float("""inf""" ) , float("""inf""" ) ) if __name__ == "__main__": import doctest doctest.testmod()
341
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available __A = { 'configuration_audio_spectrogram_transformer': [ 'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ASTConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'ASTForAudioClassification', 'ASTModel', 'ASTPreTrainedModel', ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['ASTFeatureExtractor'] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
1
"""simple docstring""" import requests from bsa import BeautifulSoup def _lowerCamelCase(__UpperCamelCase = "https://www.worldometers.info/coronavirus" ) -> dict: _lowerCAmelCase =BeautifulSoup(requests.get(__UpperCamelCase ).text , """html.parser""" ) _lowerCAmelCase =soup.findAll("""h1""" ) _lowerCAmelCase =soup.findAll("""div""" , {"""class""": """maincounter-number"""} ) keys += soup.findAll("""span""" , {"""class""": """panel-title"""} ) values += soup.findAll("""div""" , {"""class""": """number-table-main"""} ) return {key.text.strip(): value.text.strip() for key, value in zip(__UpperCamelCase , __UpperCamelCase )} if __name__ == "__main__": print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n') for key, value in world_covidaa_stats().items(): print(F"""{key}\n{value}\n""")
341
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __A = { 'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'], 'tokenization_m2m_100': ['M2M100Tokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST', 'M2M100ForConditionalGeneration', 'M2M100Model', 'M2M100PreTrainedModel', ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
1
"""simple docstring""" import unittest from diffusers.models.unet_ad_blocks import * # noqa F403 from diffusers.utils import torch_device from .test_unet_blocks_common import UNetBlockTesterMixin class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = DownBlockaD # noqa F405 lowerCamelCase = '''down''' def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase =[-0.0_2_3_2, -0.9_8_6_9, 0.8_0_5_4, -0.0_6_3_7, -0.1_6_8_8, -1.4_2_6_4, 0.4_4_7_0, -1.3_3_9_4, 0.0_9_0_4] super().test_output(__UpperCAmelCase ) class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = ResnetDownsampleBlockaD # noqa F405 lowerCamelCase = '''down''' def _lowerCAmelCase ( self ) -> str: _lowerCAmelCase =[0.0_7_1_0, 0.2_4_1_0, -0.7_3_2_0, -1.0_7_5_7, -1.1_3_4_3, 0.3_5_4_0, -0.0_1_3_3, -0.2_5_7_6, 0.0_9_4_8] super().test_output(__UpperCAmelCase ) class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = AttnDownBlockaD # noqa F405 lowerCamelCase = '''down''' def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase =[0.0_6_3_6, 0.8_9_6_4, -0.6_2_3_4, -1.0_1_3_1, 0.0_8_4_4, 0.4_9_3_5, 0.3_4_3_7, 0.0_9_1_1, -0.2_9_5_7] super().test_output(__UpperCAmelCase ) class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = CrossAttnDownBlockaD # noqa F405 lowerCamelCase = '''down''' def _lowerCAmelCase ( self ) -> Optional[int]: _lowerCAmelCase , _lowerCAmelCase =super().prepare_init_args_and_inputs_for_common() _lowerCAmelCase =32 return init_dict, inputs_dict def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =[0.2_2_3_8, -0.7_3_9_6, -0.2_2_5_5, -0.3_8_2_9, 0.1_9_2_5, 1.1_6_6_5, 0.0_6_0_3, -0.7_2_9_5, 0.1_9_8_3] super().test_output(__UpperCAmelCase ) class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = SimpleCrossAttnDownBlockaD # noqa F405 lowerCamelCase = '''down''' @property def _lowerCAmelCase ( self ) -> List[Any]: return super().get_dummy_input(include_encoder_hidden_states=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase , _lowerCAmelCase =super().prepare_init_args_and_inputs_for_common() _lowerCAmelCase =32 return init_dict, inputs_dict @unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" ) def _lowerCAmelCase ( self ) -> List[str]: _lowerCAmelCase =[0.7_9_2_1, -0.0_9_9_2, -0.1_9_6_2, -0.7_6_9_5, -0.4_2_4_2, 0.7_8_0_4, 0.4_7_3_7, 0.2_7_6_5, 0.3_3_3_8] super().test_output(__UpperCAmelCase ) class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = SkipDownBlockaD # noqa F405 lowerCamelCase = '''down''' @property def _lowerCAmelCase ( self ) -> Any: return super().get_dummy_input(include_skip_sample=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Optional[int]: _lowerCAmelCase =[-0.0_8_4_5, -0.2_0_8_7, -0.2_4_6_5, 0.0_9_7_1, 0.1_9_0_0, -0.0_4_8_4, 0.2_6_6_4, 0.4_1_7_9, 0.5_0_6_9] super().test_output(__UpperCAmelCase ) class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = AttnSkipDownBlockaD # noqa F405 lowerCamelCase = '''down''' @property def _lowerCAmelCase ( self ) -> Optional[Any]: return super().get_dummy_input(include_skip_sample=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =[0.5_5_3_9, 0.1_6_0_9, 0.4_9_2_4, 0.0_5_3_7, -0.1_9_9_5, 0.4_0_5_0, 0.0_9_7_9, -0.2_7_2_1, -0.0_6_4_2] super().test_output(__UpperCAmelCase ) class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = DownEncoderBlockaD # noqa F405 lowerCamelCase = '''down''' @property def _lowerCAmelCase ( self ) -> str: return super().get_dummy_input(include_temb=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase ={ """in_channels""": 32, """out_channels""": 32, } _lowerCAmelCase =self.dummy_input return init_dict, inputs_dict def _lowerCAmelCase ( self ) -> Optional[int]: _lowerCAmelCase =[1.1_1_0_2, 0.5_3_0_2, 0.4_8_7_2, -0.0_0_2_3, -0.8_0_4_2, 0.0_4_8_3, -0.3_4_8_9, -0.5_6_3_2, 0.7_6_2_6] super().test_output(__UpperCAmelCase ) class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = AttnDownEncoderBlockaD # noqa F405 lowerCamelCase = '''down''' @property def _lowerCAmelCase ( self ) -> Optional[int]: return super().get_dummy_input(include_temb=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase ={ """in_channels""": 32, """out_channels""": 32, } _lowerCAmelCase =self.dummy_input return init_dict, inputs_dict def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =[0.8_9_6_6, -0.1_4_8_6, 0.8_5_6_8, 0.8_1_4_1, -0.9_0_4_6, -0.1_3_4_2, -0.0_9_7_2, -0.7_4_1_7, 0.1_5_3_8] super().test_output(__UpperCAmelCase ) class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = UNetMidBlockaD # noqa F405 lowerCamelCase = '''mid''' def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase ={ """in_channels""": 32, """temb_channels""": 1_28, } _lowerCAmelCase =self.dummy_input return init_dict, inputs_dict def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =[-0.1_0_6_2, 1.7_2_4_8, 0.3_4_9_4, 1.4_5_6_9, -0.0_9_1_0, -1.2_4_2_1, -0.9_9_8_4, 0.6_7_3_6, 1.0_0_2_8] super().test_output(__UpperCAmelCase ) class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = UNetMidBlockaDCrossAttn # noqa F405 lowerCamelCase = '''mid''' def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase , _lowerCAmelCase =super().prepare_init_args_and_inputs_for_common() _lowerCAmelCase =32 return init_dict, inputs_dict def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =[0.0_1_8_7, 2.4_2_2_0, 0.4_4_8_4, 1.1_2_0_3, -0.6_1_2_1, -1.5_1_2_2, -0.8_2_7_0, 0.7_8_5_1, 1.8_3_3_5] super().test_output(__UpperCAmelCase ) class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = UNetMidBlockaDSimpleCrossAttn # noqa F405 lowerCamelCase = '''mid''' @property def _lowerCAmelCase ( self ) -> str: return super().get_dummy_input(include_encoder_hidden_states=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase , _lowerCAmelCase =super().prepare_init_args_and_inputs_for_common() _lowerCAmelCase =32 return init_dict, inputs_dict def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase =[0.7_1_4_3, 1.9_9_7_4, 0.5_4_4_8, 1.3_9_7_7, 0.1_2_8_2, -1.1_2_3_7, -1.4_2_3_8, 0.5_5_3_0, 0.8_8_8_0] super().test_output(__UpperCAmelCase ) class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = UpBlockaD # noqa F405 lowerCamelCase = '''up''' @property def _lowerCAmelCase ( self ) -> Any: return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase =[-0.2_0_4_1, -0.4_1_6_5, -0.3_0_2_2, 0.0_0_4_1, -0.6_6_2_8, -0.7_0_5_3, 0.1_9_2_8, -0.0_3_2_5, 0.0_5_2_3] super().test_output(__UpperCAmelCase ) class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = ResnetUpsampleBlockaD # noqa F405 lowerCamelCase = '''up''' @property def _lowerCAmelCase ( self ) -> Tuple: return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =[0.2_2_8_7, 0.3_5_4_9, -0.1_3_4_6, 0.4_7_9_7, -0.1_7_1_5, -0.9_6_4_9, 0.7_3_0_5, -0.5_8_6_4, -0.6_2_4_4] super().test_output(__UpperCAmelCase ) class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = CrossAttnUpBlockaD # noqa F405 lowerCamelCase = '''up''' @property def _lowerCAmelCase ( self ) -> Dict: return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Optional[int]: _lowerCAmelCase , _lowerCAmelCase =super().prepare_init_args_and_inputs_for_common() _lowerCAmelCase =32 return init_dict, inputs_dict def _lowerCAmelCase ( self ) -> Dict: _lowerCAmelCase =[-0.1_4_0_3, -0.3_5_1_5, -0.0_4_2_0, -0.1_4_2_5, 0.3_1_6_7, 0.5_0_9_4, -0.2_1_8_1, 0.5_9_3_1, 0.5_5_8_2] super().test_output(__UpperCAmelCase ) class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = SimpleCrossAttnUpBlockaD # noqa F405 lowerCamelCase = '''up''' @property def _lowerCAmelCase ( self ) -> Optional[Any]: return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase , include_encoder_hidden_states=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> str: _lowerCAmelCase , _lowerCAmelCase =super().prepare_init_args_and_inputs_for_common() _lowerCAmelCase =32 return init_dict, inputs_dict def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase =[0.2_6_4_5, 0.1_4_8_0, 0.0_9_0_9, 0.8_0_4_4, -0.9_7_5_8, -0.9_0_8_3, 0.0_9_9_4, -1.1_4_5_3, -0.7_4_0_2] super().test_output(__UpperCAmelCase ) class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = AttnUpBlockaD # noqa F405 lowerCamelCase = '''up''' @property def _lowerCAmelCase ( self ) -> List[Any]: return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase ) @unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" ) def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase =[0.0_9_7_9, 0.1_3_2_6, 0.0_0_2_1, 0.0_6_5_9, 0.2_2_4_9, 0.0_0_5_9, 0.1_1_3_2, 0.5_9_5_2, 0.1_0_3_3] super().test_output(__UpperCAmelCase ) class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = SkipUpBlockaD # noqa F405 lowerCamelCase = '''up''' @property def _lowerCAmelCase ( self ) -> Dict: return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> List[str]: _lowerCAmelCase =[-0.0_8_9_3, -0.1_2_3_4, -0.1_5_0_6, -0.0_3_3_2, 0.0_1_2_3, -0.0_2_1_1, 0.0_5_6_6, 0.0_1_4_3, 0.0_3_6_2] super().test_output(__UpperCAmelCase ) class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = AttnSkipUpBlockaD # noqa F405 lowerCamelCase = '''up''' @property def _lowerCAmelCase ( self ) -> Tuple: return super().get_dummy_input(include_res_hidden_states_tuple=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> str: _lowerCAmelCase =[0.0_3_6_1, 0.0_6_1_7, 0.2_7_8_7, -0.0_3_5_0, 0.0_3_4_2, 0.3_4_2_1, -0.0_8_4_3, 0.0_9_1_3, 0.3_0_1_5] super().test_output(__UpperCAmelCase ) class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = UpDecoderBlockaD # noqa F405 lowerCamelCase = '''up''' @property def _lowerCAmelCase ( self ) -> Any: return super().get_dummy_input(include_temb=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> str: _lowerCAmelCase ={"""in_channels""": 32, """out_channels""": 32} _lowerCAmelCase =self.dummy_input return init_dict, inputs_dict def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =[0.4_4_0_4, 0.1_9_9_8, -0.9_8_8_6, -0.3_3_2_0, -0.3_1_2_8, -0.7_0_3_4, -0.6_9_5_5, -0.2_3_3_8, -0.3_1_3_7] super().test_output(__UpperCAmelCase ) class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = AttnUpDecoderBlockaD # noqa F405 lowerCamelCase = '''up''' @property def _lowerCAmelCase ( self ) -> Tuple: return super().get_dummy_input(include_temb=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> str: _lowerCAmelCase ={"""in_channels""": 32, """out_channels""": 32} _lowerCAmelCase =self.dummy_input return init_dict, inputs_dict def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase =[0.6_7_3_8, 0.4_4_9_1, 0.1_0_5_5, 1.0_7_1_0, 0.7_3_1_6, 0.3_3_3_9, 0.3_3_5_2, 0.1_0_2_3, 0.3_5_6_8] super().test_output(__UpperCAmelCase )
341
"""simple docstring""" import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets __A = datasets.logging.get_logger(__name__) __A = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n' __A = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n' __A = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n' def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="dummy_doc" ) -> Dict: _lowerCAmelCase ={doc: key_lines} _lowerCAmelCase ={doc: sys_lines} _lowerCAmelCase ={} _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase ) key_singletons_num += singletons_num if NP_only or min_span: _lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , sys_doc_lines[doc] , __UpperCamelCase ) sys_singletons_num += singletons_num if NP_only or min_span: _lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase ) if remove_nested: _lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters _lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters _lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =(key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( """Number of removed nested coreferring mentions in the key """ F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' ) logger.info( """Number of resulting singleton clusters in the key """ F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' ) if not keep_singletons: logger.info( F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ''' """files, respectively""" ) return doc_coref_infos def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int: _lowerCAmelCase =get_coref_infos(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase ={} _lowerCAmelCase =0 _lowerCAmelCase =0 for name, metric in metrics: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =evaluator.evaluate_documents(__UpperCamelCase , __UpperCamelCase , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} ) logger.info( name.ljust(10 ) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , ) if conll_subparts_num == 3: _lowerCAmelCase =(conll / 3) * 100 logger.info(F'''CoNLL score: {conll:.2f}''' ) output_scores.update({"""conll_score""": conll} ) return output_scores def _lowerCamelCase(__UpperCamelCase ) -> Tuple: _lowerCAmelCase =False for line in key_lines: if not line.startswith("""#""" ): if len(line.split() ) > 6: _lowerCAmelCase =line.split()[5] if not parse_col == "-": _lowerCAmelCase =True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): '''simple docstring''' def _lowerCAmelCase ( self ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Sequence(datasets.Value("""string""" ) ), } ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[ """https://github.com/ns-moosavi/coval""", """https://www.aclweb.org/anthology/P16-1060""", """http://www.conll.cemantix.org/2012/data.html""", ] , ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False ) -> Optional[Any]: _lowerCAmelCase =[ ("""mentions""", evaluator.mentions), ("""muc""", evaluator.muc), ("""bcub""", evaluator.b_cubed), ("""ceafe""", evaluator.ceafe), ("""lea""", evaluator.lea), ] if min_span: _lowerCAmelCase =util.check_gold_parse_annotation(__UpperCAmelCase ) if not has_gold_parse: raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" _lowerCAmelCase =evaluate( key_lines=__UpperCAmelCase , sys_lines=__UpperCAmelCase , metrics=__UpperCAmelCase , NP_only=__UpperCAmelCase , remove_nested=__UpperCAmelCase , keep_singletons=__UpperCAmelCase , min_span=__UpperCAmelCase , ) return score
341
1
"""simple docstring""" import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class lowerCamelCase__ : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_00 , __UpperCAmelCase=13 , __UpperCAmelCase=30 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=[0, 1, 2, 3] , ) -> Optional[int]: _lowerCAmelCase =parent _lowerCAmelCase =1_00 _lowerCAmelCase =batch_size _lowerCAmelCase =image_size _lowerCAmelCase =patch_size _lowerCAmelCase =num_channels _lowerCAmelCase =is_training _lowerCAmelCase =use_labels _lowerCAmelCase =hidden_size _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads _lowerCAmelCase =intermediate_size _lowerCAmelCase =hidden_act _lowerCAmelCase =hidden_dropout_prob _lowerCAmelCase =attention_probs_dropout_prob _lowerCAmelCase =type_sequence_label_size _lowerCAmelCase =initializer_range _lowerCAmelCase =scope _lowerCAmelCase =out_indices _lowerCAmelCase =num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _lowerCAmelCase =(image_size // patch_size) ** 2 _lowerCAmelCase =num_patches + 1 def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase =None _lowerCAmelCase =None if self.use_labels: _lowerCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _lowerCAmelCase =self.get_config() return config, pixel_values, labels, pixel_labels def _lowerCAmelCase ( self ) -> Union[str, Any]: return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: _lowerCAmelCase =BeitModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _lowerCAmelCase =model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str: _lowerCAmelCase =BeitForMaskedImageModeling(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _lowerCAmelCase =model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: _lowerCAmelCase =self.type_sequence_label_size _lowerCAmelCase =BeitForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _lowerCAmelCase =model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _lowerCAmelCase =1 _lowerCAmelCase =BeitForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _lowerCAmelCase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowerCAmelCase =model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: _lowerCAmelCase =self.num_labels _lowerCAmelCase =BeitForSemanticSegmentation(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _lowerCAmelCase =model(__UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) _lowerCAmelCase =model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def _lowerCAmelCase ( self ) -> Dict: _lowerCAmelCase =self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs _lowerCAmelCase ={"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) lowerCamelCase = ( { '''feature-extraction''': BeitModel, '''image-classification''': BeitForImageClassification, '''image-segmentation''': BeitForSemanticSegmentation, } if is_torch_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase =BeitModelTester(self ) _lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 ) def _lowerCAmelCase ( self ) -> int: self.config_tester.run_common_tests() @unittest.skip(reason="""BEiT does not use inputs_embeds""" ) def _lowerCAmelCase ( self ) -> Optional[Any]: pass @require_torch_multi_gpu @unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def _lowerCAmelCase ( self ) -> List[Any]: pass def _lowerCAmelCase ( self ) -> Dict: _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase =model_class(__UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _lowerCAmelCase =model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) ) def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase =model_class(__UpperCAmelCase ) _lowerCAmelCase =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase =[*signature.parameters.keys()] _lowerCAmelCase =["""pixel_values"""] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Dict: if not self.model_tester.is_training: return _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase =True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]: continue _lowerCAmelCase =model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.train() _lowerCAmelCase =self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) _lowerCAmelCase =model(**__UpperCAmelCase ).loss loss.backward() def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return _lowerCAmelCase =False _lowerCAmelCase =True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue _lowerCAmelCase =model_class(__UpperCAmelCase ) model.gradient_checkpointing_enable() model.to(__UpperCAmelCase ) model.train() _lowerCAmelCase =self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) _lowerCAmelCase =model(**__UpperCAmelCase ).loss loss.backward() def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase =_config_zero_init(__UpperCAmelCase ) for model_class in self.all_model_classes: _lowerCAmelCase =model_class(config=__UpperCAmelCase ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @slow def _lowerCAmelCase ( self ) -> List[str]: for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase =BeitModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def _lowerCamelCase() -> Dict: _lowerCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def _lowerCAmelCase ( self ) -> Any: return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None @slow def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase =BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(__UpperCAmelCase ) _lowerCAmelCase =self.default_image_processor _lowerCAmelCase =prepare_img() _lowerCAmelCase =image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).pixel_values.to(__UpperCAmelCase ) # prepare bool_masked_pos _lowerCAmelCase =torch.ones((1, 1_96) , dtype=torch.bool ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): _lowerCAmelCase =model(pixel_values=__UpperCAmelCase , bool_masked_pos=__UpperCAmelCase ) _lowerCAmelCase =outputs.logits # verify the logits _lowerCAmelCase =torch.Size((1, 1_96, 81_92) ) self.assertEqual(logits.shape , __UpperCAmelCase ) _lowerCAmelCase =torch.tensor( [[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __UpperCAmelCase , atol=1e-2 ) ) @slow def _lowerCAmelCase ( self ) -> List[str]: _lowerCAmelCase =BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(__UpperCAmelCase ) _lowerCAmelCase =self.default_image_processor _lowerCAmelCase =prepare_img() _lowerCAmelCase =image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): _lowerCAmelCase =model(**__UpperCAmelCase ) _lowerCAmelCase =outputs.logits # verify the logits _lowerCAmelCase =torch.Size((1, 10_00) ) self.assertEqual(logits.shape , __UpperCAmelCase ) _lowerCAmelCase =torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) ) _lowerCAmelCase =2_81 self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase ) @slow def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase =BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to( __UpperCAmelCase ) _lowerCAmelCase =self.default_image_processor _lowerCAmelCase =prepare_img() _lowerCAmelCase =image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): _lowerCAmelCase =model(**__UpperCAmelCase ) _lowerCAmelCase =outputs.logits # verify the logits _lowerCAmelCase =torch.Size((1, 2_18_41) ) self.assertEqual(logits.shape , __UpperCAmelCase ) _lowerCAmelCase =torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) ) _lowerCAmelCase =23_96 self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase ) @slow def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" ) _lowerCAmelCase =model.to(__UpperCAmelCase ) _lowerCAmelCase =BeitImageProcessor(do_resize=__UpperCAmelCase , size=6_40 , do_center_crop=__UpperCAmelCase ) _lowerCAmelCase =load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" ) _lowerCAmelCase =Image.open(ds[0]["""file"""] ) _lowerCAmelCase =image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): _lowerCAmelCase =model(**__UpperCAmelCase ) _lowerCAmelCase =outputs.logits # verify the logits _lowerCAmelCase =torch.Size((1, 1_50, 1_60, 1_60) ) self.assertEqual(logits.shape , __UpperCAmelCase ) _lowerCAmelCase =version.parse(PIL.__version__ ) < version.parse("""9.0.0""" ) if is_pillow_less_than_a: _lowerCAmelCase =torch.tensor( [ [[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]], [[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]], [[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]], ] , device=__UpperCAmelCase , ) else: _lowerCAmelCase =torch.tensor( [ [[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]], [[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]], [[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]], ] , device=__UpperCAmelCase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1e-4 ) ) @slow def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" ) _lowerCAmelCase =model.to(__UpperCAmelCase ) _lowerCAmelCase =BeitImageProcessor(do_resize=__UpperCAmelCase , size=6_40 , do_center_crop=__UpperCAmelCase ) _lowerCAmelCase =load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" ) _lowerCAmelCase =Image.open(ds[0]["""file"""] ) _lowerCAmelCase =image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): _lowerCAmelCase =model(**__UpperCAmelCase ) _lowerCAmelCase =outputs.logits.detach().cpu() _lowerCAmelCase =image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase , target_sizes=[(5_00, 3_00)] ) _lowerCAmelCase =torch.Size((5_00, 3_00) ) self.assertEqual(segmentation[0].shape , __UpperCAmelCase ) _lowerCAmelCase =image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase ) _lowerCAmelCase =torch.Size((1_60, 1_60) ) self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
341
"""simple docstring""" from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class lowerCamelCase__ : '''simple docstring''' lowerCamelCase = XGLMConfig lowerCamelCase = {} lowerCamelCase = '''gelu''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=0.0_2 , ) -> List[str]: _lowerCAmelCase =parent _lowerCAmelCase =batch_size _lowerCAmelCase =seq_length _lowerCAmelCase =is_training _lowerCAmelCase =use_input_mask _lowerCAmelCase =use_labels _lowerCAmelCase =vocab_size _lowerCAmelCase =d_model _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads _lowerCAmelCase =ffn_dim _lowerCAmelCase =activation_function _lowerCAmelCase =activation_dropout _lowerCAmelCase =attention_dropout _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =initializer_range _lowerCAmelCase =None _lowerCAmelCase =0 _lowerCAmelCase =2 _lowerCAmelCase =1 def _lowerCAmelCase ( self ) -> Dict: return XGLMConfig.from_pretrained("""facebook/xglm-564M""" ) def _lowerCAmelCase ( self ) -> str: _lowerCAmelCase =tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) _lowerCAmelCase =None if self.use_input_mask: _lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase =self.get_config() _lowerCAmelCase =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def _lowerCAmelCase ( self ) -> str: return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__UpperCAmelCase , ) def _lowerCAmelCase ( self ) -> Dict: _lowerCAmelCase =self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) =config_and_inputs _lowerCAmelCase ={ """input_ids""": input_ids, """head_mask""": head_mask, } return config, inputs_dict @require_tf class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () lowerCamelCase = (TFXGLMForCausalLM,) if is_tf_available() else () lowerCamelCase = ( {'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =TFXGLMModelTester(self ) _lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase , n_embd=37 ) def _lowerCAmelCase ( self ) -> int: self.config_tester.run_common_tests() @slow def _lowerCAmelCase ( self ) -> Union[str, Any]: for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase =TFXGLMModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" ) def _lowerCAmelCase ( self ) -> Union[str, Any]: super().test_resize_token_embeddings() @require_tf class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def _lowerCAmelCase ( self , __UpperCAmelCase=True ) -> str: _lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase =tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off _lowerCAmelCase =[2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81] # fmt: on _lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCAmelCase ) @slow def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) tf.random.set_seed(0 ) _lowerCAmelCase =tokenizer("""Today is a nice day and""" , return_tensors="""tf""" ) _lowerCAmelCase =tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(""":/CPU:0""" ): _lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , seed=[7, 0] ) _lowerCAmelCase =tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =( """Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due""" ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) @slow def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase ="""left""" # use different length sentences to test batching _lowerCAmelCase =[ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When""", """Hello, my dog is a little""", ] _lowerCAmelCase =tokenizer(__UpperCAmelCase , return_tensors="""tf""" , padding=__UpperCAmelCase ) _lowerCAmelCase =inputs["""input_ids"""] _lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 ) _lowerCAmelCase =tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids _lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 ) _lowerCAmelCase =tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids _lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 ) _lowerCAmelCase =tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =[ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """ """a single""", """Hello, my dog is a little bit of a shy one, but he is very friendly""", ] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
341
1
"""simple docstring""" from __future__ import annotations import queue class lowerCamelCase__ : '''simple docstring''' def __init__( self , __UpperCAmelCase ) -> List[str]: _lowerCAmelCase =data _lowerCAmelCase =None _lowerCAmelCase =None def _lowerCamelCase() -> TreeNode: print("""\n********Press N to stop entering at any point of time********\n""" ) _lowerCAmelCase =input("""Enter the value of the root node: """ ).strip().lower() _lowerCAmelCase =queue.Queue() _lowerCAmelCase =TreeNode(int(__UpperCamelCase ) ) q.put(__UpperCamelCase ) while not q.empty(): _lowerCAmelCase =q.get() _lowerCAmelCase =F'''Enter the left node of {node_found.data}: ''' _lowerCAmelCase =input(__UpperCamelCase ).strip().lower() or """n""" if check == "n": return tree_node _lowerCAmelCase =TreeNode(int(__UpperCamelCase ) ) _lowerCAmelCase =left_node q.put(__UpperCamelCase ) _lowerCAmelCase =F'''Enter the right node of {node_found.data}: ''' _lowerCAmelCase =input(__UpperCamelCase ).strip().lower() or """n""" if check == "n": return tree_node _lowerCAmelCase =TreeNode(int(__UpperCamelCase ) ) _lowerCAmelCase =right_node q.put(__UpperCamelCase ) raise def _lowerCamelCase(__UpperCamelCase ) -> None: if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node: return print(node.data , end=""",""" ) pre_order(node.left ) pre_order(node.right ) def _lowerCamelCase(__UpperCamelCase ) -> None: if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node: return in_order(node.left ) print(node.data , end=""",""" ) in_order(node.right ) def _lowerCamelCase(__UpperCamelCase ) -> None: if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=""",""" ) def _lowerCamelCase(__UpperCamelCase ) -> None: if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node: return _lowerCAmelCase =queue.Queue() q.put(__UpperCamelCase ) while not q.empty(): _lowerCAmelCase =q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def _lowerCamelCase(__UpperCamelCase ) -> None: if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node: return _lowerCAmelCase =queue.Queue() q.put(__UpperCamelCase ) while not q.empty(): _lowerCAmelCase =[] while not q.empty(): _lowerCAmelCase =q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(__UpperCamelCase ) def _lowerCamelCase(__UpperCamelCase ) -> None: if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node: return _lowerCAmelCase =[] _lowerCAmelCase =node while n or stack: while n: # start from root node, find its left child print(n.data , end=""",""" ) stack.append(__UpperCamelCase ) _lowerCAmelCase =n.left # end of while means current node doesn't have left child _lowerCAmelCase =stack.pop() # start to traverse its right child _lowerCAmelCase =n.right def _lowerCamelCase(__UpperCamelCase ) -> None: if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node: return _lowerCAmelCase =[] _lowerCAmelCase =node while n or stack: while n: stack.append(__UpperCamelCase ) _lowerCAmelCase =n.left _lowerCAmelCase =stack.pop() print(n.data , end=""",""" ) _lowerCAmelCase =n.right def _lowerCamelCase(__UpperCamelCase ) -> None: if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not node: return _lowerCAmelCase , _lowerCAmelCase =[], [] _lowerCAmelCase =node stacka.append(__UpperCamelCase ) while stacka: # to find the reversed order of post order, store it in stack2 _lowerCAmelCase =stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(__UpperCamelCase ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=""",""" ) def _lowerCamelCase(__UpperCamelCase = "" , __UpperCamelCase=50 , __UpperCamelCase="*" ) -> str: if not s: return "\n" + width * char _lowerCAmelCase , _lowerCAmelCase =divmod(width - len(__UpperCamelCase ) - 2 , 2 ) return F'''{left * char} {s} {(left + extra) * char}''' if __name__ == "__main__": import doctest doctest.testmod() print(prompt('Binary Tree Traversals')) __A = build_tree() print(prompt('Pre Order Traversal')) pre_order(node) print(prompt() + '\n') print(prompt('In Order Traversal')) in_order(node) print(prompt() + '\n') print(prompt('Post Order Traversal')) post_order(node) print(prompt() + '\n') print(prompt('Level Order Traversal')) level_order(node) print(prompt() + '\n') print(prompt('Actual Level Order Traversal')) level_order_actual(node) print('*' * 50 + '\n') print(prompt('Pre Order Traversal - Iteration Version')) pre_order_iter(node) print(prompt() + '\n') print(prompt('In Order Traversal - Iteration Version')) in_order_iter(node) print(prompt() + '\n') print(prompt('Post Order Traversal - Iteration Version')) post_order_iter(node) print(prompt())
341
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __A = logging.get_logger(__name__) __A = {'vocab_file': 'spiece.model'} __A = { 'vocab_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model', } } __A = { 'xlnet-base-cased': None, 'xlnet-large-cased': None, } # Segments (not really needed) __A = 0 __A = 1 __A = 2 __A = 3 __A = 4 class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = VOCAB_FILES_NAMES lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase = '''left''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<sep>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<cls>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<eop>", "<eod>"] , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token _lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , ) _lowerCAmelCase =3 _lowerCAmelCase =do_lower_case _lowerCAmelCase =remove_space _lowerCAmelCase =keep_accents _lowerCAmelCase =vocab_file _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCAmelCase ) @property def _lowerCAmelCase ( self ) -> str: return len(self.sp_model ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Optional[int]: _lowerCAmelCase =self.__dict__.copy() _lowerCAmelCase =None return state def __setstate__( self , __UpperCAmelCase ) -> Tuple: _lowerCAmelCase =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _lowerCAmelCase ={} _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[Any]: if self.remove_space: _lowerCAmelCase =""" """.join(inputs.strip().split() ) else: _lowerCAmelCase =inputs _lowerCAmelCase =outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" ) if not self.keep_accents: _lowerCAmelCase =unicodedata.normalize("""NFKD""" , __UpperCAmelCase ) _lowerCAmelCase ="""""".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] ) if self.do_lower_case: _lowerCAmelCase =outputs.lower() return outputs def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]: _lowerCAmelCase =self.preprocess_text(__UpperCAmelCase ) _lowerCAmelCase =self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase ) _lowerCAmelCase =[] for piece in pieces: if len(__UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): _lowerCAmelCase =self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , """""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: _lowerCAmelCase =cur_pieces[1:] else: _lowerCAmelCase =cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__UpperCAmelCase ) else: new_pieces.append(__UpperCAmelCase ) return new_pieces def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]: return self.sp_model.PieceToId(__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]: return self.sp_model.IdToPiece(__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str: _lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip() return out_string def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> str: _lowerCAmelCase =kwargs.pop("""use_source_tokenizer""" , __UpperCAmelCase ) _lowerCAmelCase =self.convert_ids_to_tokens(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 _lowerCAmelCase =[] _lowerCAmelCase =[] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) ) _lowerCAmelCase =[] sub_texts.append(__UpperCAmelCase ) else: current_sub_text.append(__UpperCAmelCase ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens _lowerCAmelCase ="""""".join(__UpperCAmelCase ) _lowerCAmelCase =( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: _lowerCAmelCase =self.clean_up_tokenization(__UpperCAmelCase ) return clean_text else: return text def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _lowerCAmelCase =[self.sep_token_id] _lowerCAmelCase =[self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is not None: return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] return ([0] * len(__UpperCAmelCase )) + [1, 1] def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _lowerCAmelCase =[self.sep_token_id] _lowerCAmelCase =[2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _lowerCAmelCase =os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase , """wb""" ) as fi: _lowerCAmelCase =self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,)
341
1
"""simple docstring""" import warnings from .generation import TFGenerationMixin class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' # warning at import time warnings.warn( '''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will ''' '''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , __magic_name__ , )
341
"""simple docstring""" from __future__ import annotations def _lowerCamelCase(__UpperCamelCase ) -> bool: _lowerCAmelCase =str(__UpperCamelCase ) return n == n[::-1] def _lowerCamelCase(__UpperCamelCase = 1000000 ) -> str: _lowerCAmelCase =0 for i in range(1 , __UpperCamelCase ): if is_palindrome(__UpperCamelCase ) and is_palindrome(bin(__UpperCamelCase ).split("""b""" )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
341
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = { 'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Swinv2ForImageClassification', 'Swinv2ForMaskedImageModeling', 'Swinv2Model', 'Swinv2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = {} class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = '''llama''' lowerCamelCase = ['''past_key_values'''] def __init__( self , __UpperCAmelCase=3_20_00 , __UpperCAmelCase=40_96 , __UpperCAmelCase=1_10_08 , __UpperCAmelCase=32 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase="silu" , __UpperCAmelCase=20_48 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-6 , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=False , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]: _lowerCAmelCase =vocab_size _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =hidden_size _lowerCAmelCase =intermediate_size _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads # for backward compatibility if num_key_value_heads is None: _lowerCAmelCase =num_attention_heads _lowerCAmelCase =num_key_value_heads _lowerCAmelCase =hidden_act _lowerCAmelCase =initializer_range _lowerCAmelCase =rms_norm_eps _lowerCAmelCase =pretraining_tp _lowerCAmelCase =use_cache _lowerCAmelCase =rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , ) def _lowerCAmelCase ( self ) -> str: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ f'''got {self.rope_scaling}''' ) _lowerCAmelCase =self.rope_scaling.get("""type""" , __UpperCAmelCase ) _lowerCAmelCase =self.rope_scaling.get("""factor""" , __UpperCAmelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
341
1
"""simple docstring""" import argparse import torch from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel from transformers.utils import logging logging.set_verbosity_info() def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict: # Initialise PyTorch model _lowerCAmelCase =FunnelConfig.from_json_file(__UpperCamelCase ) print(F'''Building PyTorch model from configuration: {config}''' ) _lowerCAmelCase =FunnelBaseModel(__UpperCamelCase ) if base_model else FunnelModel(__UpperCamelCase ) # Load weights from tf checkpoint load_tf_weights_in_funnel(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , __UpperCamelCase ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.' ) __A = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model )
341
"""simple docstring""" import warnings from .generation import TFGenerationMixin class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' # warning at import time warnings.warn( '''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will ''' '''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , __magic_name__ , )
341
1
"""simple docstring""" import argparse import struct import unittest class lowerCamelCase__ : '''simple docstring''' def __init__( self , __UpperCAmelCase ) -> None: _lowerCAmelCase =data # Initialize hash values _lowerCAmelCase =[ 0x6_a_0_9_e_6_6_7, 0xb_b_6_7_a_e_8_5, 0x3_c_6_e_f_3_7_2, 0xa_5_4_f_f_5_3_a, 0x5_1_0_e_5_2_7_f, 0x9_b_0_5_6_8_8_c, 0x1_f_8_3_d_9_a_b, 0x5_b_e_0_c_d_1_9, ] # Initialize round constants _lowerCAmelCase =[ 0x4_2_8_a_2_f_9_8, 0x7_1_3_7_4_4_9_1, 0xb_5_c_0_f_b_c_f, 0xe_9_b_5_d_b_a_5, 0x3_9_5_6_c_2_5_b, 0x5_9_f_1_1_1_f_1, 0x9_2_3_f_8_2_a_4, 0xa_b_1_c_5_e_d_5, 0xd_8_0_7_a_a_9_8, 0x1_2_8_3_5_b_0_1, 0x2_4_3_1_8_5_b_e, 0x5_5_0_c_7_d_c_3, 0x7_2_b_e_5_d_7_4, 0x8_0_d_e_b_1_f_e, 0x9_b_d_c_0_6_a_7, 0xc_1_9_b_f_1_7_4, 0xe_4_9_b_6_9_c_1, 0xe_f_b_e_4_7_8_6, 0x0_f_c_1_9_d_c_6, 0x2_4_0_c_a_1_c_c, 0x2_d_e_9_2_c_6_f, 0x4_a_7_4_8_4_a_a, 0x5_c_b_0_a_9_d_c, 0x7_6_f_9_8_8_d_a, 0x9_8_3_e_5_1_5_2, 0xa_8_3_1_c_6_6_d, 0xb_0_0_3_2_7_c_8, 0xb_f_5_9_7_f_c_7, 0xc_6_e_0_0_b_f_3, 0xd_5_a_7_9_1_4_7, 0x0_6_c_a_6_3_5_1, 0x1_4_2_9_2_9_6_7, 0x2_7_b_7_0_a_8_5, 0x2_e_1_b_2_1_3_8, 0x4_d_2_c_6_d_f_c, 0x5_3_3_8_0_d_1_3, 0x6_5_0_a_7_3_5_4, 0x7_6_6_a_0_a_b_b, 0x8_1_c_2_c_9_2_e, 0x9_2_7_2_2_c_8_5, 0xa_2_b_f_e_8_a_1, 0xa_8_1_a_6_6_4_b, 0xc_2_4_b_8_b_7_0, 0xc_7_6_c_5_1_a_3, 0xd_1_9_2_e_8_1_9, 0xd_6_9_9_0_6_2_4, 0xf_4_0_e_3_5_8_5, 0x1_0_6_a_a_0_7_0, 0x1_9_a_4_c_1_1_6, 0x1_e_3_7_6_c_0_8, 0x2_7_4_8_7_7_4_c, 0x3_4_b_0_b_c_b_5, 0x3_9_1_c_0_c_b_3, 0x4_e_d_8_a_a_4_a, 0x5_b_9_c_c_a_4_f, 0x6_8_2_e_6_f_f_3, 0x7_4_8_f_8_2_e_e, 0x7_8_a_5_6_3_6_f, 0x8_4_c_8_7_8_1_4, 0x8_c_c_7_0_2_0_8, 0x9_0_b_e_f_f_f_a, 0xa_4_5_0_6_c_e_b, 0xb_e_f_9_a_3_f_7, 0xc_6_7_1_7_8_f_2, ] _lowerCAmelCase =self.preprocessing(self.data ) self.final_hash() @staticmethod def _lowerCAmelCase ( __UpperCAmelCase ) -> bytes: _lowerCAmelCase =b"""\x80""" + (b"""\x00""" * (63 - (len(__UpperCAmelCase ) + 8) % 64)) _lowerCAmelCase =struct.pack(""">Q""" , (len(__UpperCAmelCase ) * 8) ) return data + padding + big_endian_integer def _lowerCAmelCase ( self ) -> None: # Convert into blocks of 64 bytes _lowerCAmelCase =[ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data ) , 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers _lowerCAmelCase =list(struct.unpack(""">16L""" , __UpperCAmelCase ) ) # add 48 0-ed integers words += [0] * 48 _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self.hashes for index in range(0 , 64 ): if index > 15: # modify the zero-ed indexes at the end of the array _lowerCAmelCase =( self.ror(words[index - 15] , 7 ) ^ self.ror(words[index - 15] , 18 ) ^ (words[index - 15] >> 3) ) _lowerCAmelCase =( self.ror(words[index - 2] , 17 ) ^ self.ror(words[index - 2] , 19 ) ^ (words[index - 2] >> 10) ) _lowerCAmelCase =( words[index - 16] + sa + words[index - 7] + sa ) % 0x1_0_0_0_0_0_0_0_0 # Compression _lowerCAmelCase =self.ror(__UpperCAmelCase , 6 ) ^ self.ror(__UpperCAmelCase , 11 ) ^ self.ror(__UpperCAmelCase , 25 ) _lowerCAmelCase =(e & f) ^ ((~e & 0xf_f_f_f_f_f_f_f) & g) _lowerCAmelCase =( h + sa + ch + self.round_constants[index] + words[index] ) % 0x1_0_0_0_0_0_0_0_0 _lowerCAmelCase =self.ror(__UpperCAmelCase , 2 ) ^ self.ror(__UpperCAmelCase , 13 ) ^ self.ror(__UpperCAmelCase , 22 ) _lowerCAmelCase =(a & b) ^ (a & c) ^ (b & c) _lowerCAmelCase =(sa + maj) % 0x1_0_0_0_0_0_0_0_0 _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =( g, f, e, ((d + tempa) % 0x1_0_0_0_0_0_0_0_0), c, b, a, ((tempa + tempa) % 0x1_0_0_0_0_0_0_0_0), ) _lowerCAmelCase =[a, b, c, d, e, f, g, h] # Modify final values _lowerCAmelCase =[ ((element + mutated_hash_values[index]) % 0x1_0_0_0_0_0_0_0_0) for index, element in enumerate(self.hashes ) ] _lowerCAmelCase ="""""".join([hex(__UpperCAmelCase )[2:].zfill(8 ) for value in self.hashes] ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> int: return 0xf_f_f_f_f_f_f_f & (value << (32 - rotations)) | (value >> rotations) class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> None: import hashlib _lowerCAmelCase =bytes("""Test String""" , """utf-8""" ) self.assertEqual(SHAaaa(__UpperCAmelCase ).hash , hashlib.shaaaa(__UpperCAmelCase ).hexdigest() ) def _lowerCamelCase() -> None: import doctest doctest.testmod() _lowerCAmelCase =argparse.ArgumentParser() parser.add_argument( """-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument( """-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) _lowerCAmelCase =parser.parse_args() _lowerCAmelCase =args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: _lowerCAmelCase =f.read() else: _lowerCAmelCase =bytes(__UpperCamelCase , """utf-8""" ) print(SHAaaa(__UpperCamelCase ).hash ) if __name__ == "__main__": main()
341
"""simple docstring""" import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class lowerCamelCase__ : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=16 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=30 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=None , ) -> Any: _lowerCAmelCase =parent _lowerCAmelCase =batch_size _lowerCAmelCase =decoder_seq_length # For common tests _lowerCAmelCase =self.decoder_seq_length _lowerCAmelCase =is_training _lowerCAmelCase =use_attention_mask _lowerCAmelCase =use_labels _lowerCAmelCase =vocab_size _lowerCAmelCase =d_model _lowerCAmelCase =d_model _lowerCAmelCase =decoder_layers _lowerCAmelCase =decoder_layers _lowerCAmelCase =decoder_ffn_dim _lowerCAmelCase =decoder_attention_heads _lowerCAmelCase =decoder_attention_heads _lowerCAmelCase =eos_token_id _lowerCAmelCase =bos_token_id _lowerCAmelCase =pad_token_id _lowerCAmelCase =decoder_start_token_id _lowerCAmelCase =use_cache _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =None _lowerCAmelCase =decoder_seq_length _lowerCAmelCase =2 _lowerCAmelCase =1 def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) _lowerCAmelCase =None if self.use_attention_mask: _lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) _lowerCAmelCase =None if self.use_labels: _lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) _lowerCAmelCase =TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[Any]: _lowerCAmelCase =True _lowerCAmelCase =TrOCRDecoder(config=__UpperCAmelCase ).to(__UpperCAmelCase ).eval() _lowerCAmelCase =input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass _lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase ) _lowerCAmelCase =model(__UpperCAmelCase ) _lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase ) self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) ) self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 ) _lowerCAmelCase =outputs["""past_key_values"""] # create hypothetical next token and extent to next_input_ids _lowerCAmelCase =ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and _lowerCAmelCase =torch.cat([input_ids, next_tokens] , dim=-1 ) _lowerCAmelCase =model(__UpperCAmelCase )["""last_hidden_state"""] _lowerCAmelCase =model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )["""last_hidden_state"""] # select random slice _lowerCAmelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item() _lowerCAmelCase =output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() _lowerCAmelCase =output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) def _lowerCAmelCase ( self ) -> List[str]: _lowerCAmelCase =self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs _lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_torch class lowerCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () lowerCamelCase = (TrOCRForCausalLM,) if is_torch_available() else () lowerCamelCase = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {} lowerCamelCase = True lowerCamelCase = False def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =TrOCRStandaloneDecoderModelTester(self , is_training=__UpperCAmelCase ) _lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> List[str]: pass def _lowerCAmelCase ( self ) -> List[Any]: pass def _lowerCAmelCase ( self ) -> Any: pass def _lowerCAmelCase ( self ) -> Optional[Any]: self.config_tester.run_common_tests() def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Tuple: return @unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :) def _lowerCAmelCase ( self ) -> str: pass
341
1
"""simple docstring""" from math import pow def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> tuple[int, int]: if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count _lowerCAmelCase =int(pow(__UpperCamelCase , __UpperCamelCase ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n _lowerCAmelCase , _lowerCAmelCase =backtrack( __UpperCamelCase , __UpperCamelCase , current_number + 1 , __UpperCamelCase , __UpperCamelCase ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. _lowerCAmelCase , _lowerCAmelCase =backtrack( __UpperCamelCase , __UpperCamelCase , current_number + 1 , __UpperCamelCase , __UpperCamelCase ) return current_sum, solutions_count def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> int: if not (1 <= needed_sum <= 1000 and 2 <= power <= 10): raise ValueError( """Invalid input\n""" """needed_sum must be between 1 and 1000, power between 2 and 10.""" ) return backtrack(__UpperCamelCase , __UpperCamelCase , 1 , 0 , 0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
341
"""simple docstring""" import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' lowerCamelCase = JukeboxTokenizer lowerCamelCase = { '''artist''': '''Zac Brown Band''', '''genres''': '''Country''', '''lyrics''': '''I met a traveller from an antique land, Who said "Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away ''', } @require_torch def _lowerCAmelCase ( self ) -> str: import torch _lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" ) _lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""] # fmt: off _lowerCAmelCase =[ torch.tensor([[ 0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def _lowerCAmelCase ( self ) -> Any: import torch _lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" ) _lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""] # fmt: off _lowerCAmelCase =[ torch.tensor([[ 0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
341
1
"""simple docstring""" import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =0 def _lowerCAmelCase ( self ) -> str: _lowerCAmelCase =AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Union[str, Any]: with tempfile.TemporaryDirectory() as tmpdirname: _lowerCAmelCase =Path(__UpperCAmelCase ) / """preprocessor_config.json""" _lowerCAmelCase =Path(__UpperCAmelCase ) / """config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__UpperCAmelCase , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(__UpperCAmelCase , """w""" ) ) _lowerCAmelCase =AutoImageProcessor.from_pretrained(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Union[str, Any]: # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: _lowerCAmelCase =Path(__UpperCAmelCase ) / """preprocessor_config.json""" _lowerCAmelCase =Path(__UpperCAmelCase ) / """config.json""" json.dump( {"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__UpperCAmelCase , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(__UpperCAmelCase , """w""" ) ) _lowerCAmelCase =AutoImageProcessor.from_pretrained(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Dict: with tempfile.TemporaryDirectory() as tmpdirname: _lowerCAmelCase =CLIPConfig() # Create a dummy config file with image_proceesor_type _lowerCAmelCase =Path(__UpperCAmelCase ) / """preprocessor_config.json""" _lowerCAmelCase =Path(__UpperCAmelCase ) / """config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__UpperCAmelCase , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(__UpperCAmelCase , """w""" ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally _lowerCAmelCase =AutoImageProcessor.from_pretrained(__UpperCAmelCase ).to_dict() config_dict.pop("""image_processor_type""" ) _lowerCAmelCase =CLIPImageProcessor(**__UpperCAmelCase ) # save in new folder model_config.save_pretrained(__UpperCAmelCase ) config.save_pretrained(__UpperCAmelCase ) _lowerCAmelCase =AutoImageProcessor.from_pretrained(__UpperCAmelCase ) # make sure private variable is not incorrectly saved _lowerCAmelCase =json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Tuple: with tempfile.TemporaryDirectory() as tmpdirname: _lowerCAmelCase =Path(__UpperCAmelCase ) / """preprocessor_config.json""" json.dump( {"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__UpperCAmelCase , """w""" ) , ) _lowerCAmelCase =AutoImageProcessor.from_pretrained(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Dict: with self.assertRaisesRegex( __UpperCAmelCase , """clip-base is not a local folder and is not a valid model identifier""" ): _lowerCAmelCase =AutoImageProcessor.from_pretrained("""clip-base""" ) def _lowerCAmelCase ( self ) -> Tuple: with self.assertRaisesRegex( __UpperCAmelCase , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): _lowerCAmelCase =AutoImageProcessor.from_pretrained(__UpperCAmelCase , revision="""aaaaaa""" ) def _lowerCAmelCase ( self ) -> List[str]: with self.assertRaisesRegex( __UpperCAmelCase , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ): _lowerCAmelCase =AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" ) def _lowerCAmelCase ( self ) -> Optional[Any]: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__UpperCAmelCase ): _lowerCAmelCase =AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__UpperCAmelCase ): _lowerCAmelCase =AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__UpperCAmelCase ) _lowerCAmelCase =AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__UpperCAmelCase ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__UpperCAmelCase ) _lowerCAmelCase =AutoImageProcessor.from_pretrained(__UpperCAmelCase , trust_remote_code=__UpperCAmelCase ) self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" ) def _lowerCAmelCase ( self ) -> Dict: try: AutoConfig.register("""custom""" , __UpperCAmelCase ) AutoImageProcessor.register(__UpperCAmelCase , __UpperCAmelCase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__UpperCAmelCase ): AutoImageProcessor.register(__UpperCAmelCase , __UpperCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: _lowerCAmelCase =Path(__UpperCAmelCase ) / """preprocessor_config.json""" _lowerCAmelCase =Path(__UpperCAmelCase ) / """config.json""" json.dump( {"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__UpperCAmelCase , """w""" ) , ) json.dump({"""model_type""": """clip"""} , open(__UpperCAmelCase , """w""" ) ) _lowerCAmelCase =CustomImageProcessor.from_pretrained(__UpperCAmelCase ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__UpperCAmelCase ) _lowerCAmelCase =AutoImageProcessor.from_pretrained(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def _lowerCAmelCase ( self ) -> Tuple: class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = True try: AutoConfig.register("""custom""" , __UpperCAmelCase ) AutoImageProcessor.register(__UpperCAmelCase , __UpperCAmelCase ) # If remote code is not set, the default is to use local _lowerCAmelCase =AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. _lowerCAmelCase =AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__UpperCAmelCase ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub _lowerCAmelCase =AutoImageProcessor.from_pretrained( """hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__UpperCAmelCase ) self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" ) self.assertTrue(not hasattr(__UpperCAmelCase , """is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
341
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __A = logging.get_logger(__name__) __A = '▁' __A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'} __A = { 'vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model', }, 'monolingual_vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt', }, } __A = {'vinai/bartpho-syllable': 1024} class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = VOCAB_FILES_NAMES lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token _lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , ) _lowerCAmelCase =vocab_file _lowerCAmelCase =monolingual_vocab_file _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__UpperCAmelCase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility _lowerCAmelCase ={} _lowerCAmelCase =0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids: _lowerCAmelCase =cnt cnt += 1 with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f: for line in f.readlines(): _lowerCAmelCase =line.strip().split()[0] _lowerCAmelCase =len(self.fairseq_tokens_to_ids ) if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids: _lowerCAmelCase =len(self.fairseq_tokens_to_ids ) _lowerCAmelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> Dict: _lowerCAmelCase =self.__dict__.copy() _lowerCAmelCase =None _lowerCAmelCase =self.sp_model.serialized_model_proto() return state def __setstate__( self , __UpperCAmelCase ) -> List[Any]: _lowerCAmelCase =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _lowerCAmelCase ={} _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _lowerCAmelCase =[self.cls_token_id] _lowerCAmelCase =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1] def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _lowerCAmelCase =[self.sep_token_id] _lowerCAmelCase =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _lowerCAmelCase ( self ) -> Union[str, Any]: return len(self.fairseq_ids_to_tokens ) def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]: return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]: return self.fairseq_ids_to_tokens[index] def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]: _lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip() return out_string def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _lowerCAmelCase =os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) _lowerCAmelCase =os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase , """wb""" ) as fi: _lowerCAmelCase =self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( __UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'''{str(__UpperCAmelCase )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
341
1
"""simple docstring""" import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __A = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = XLMProphetNetTokenizer lowerCamelCase = False lowerCamelCase = True def _lowerCAmelCase ( self ) -> Union[str, Any]: super().setUp() # We have a SentencePiece fixture for testing _lowerCAmelCase =XLMProphetNetTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase ="""[PAD]""" _lowerCAmelCase =0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """[PAD]""" ) self.assertEqual(vocab_keys[1] , """[CLS]""" ) self.assertEqual(vocab_keys[-1] , """j""" ) self.assertEqual(len(__UpperCAmelCase ) , 10_12 ) def _lowerCAmelCase ( self ) -> Tuple: self.assertEqual(self.get_tokenizer().vocab_size , 10_12 ) def _lowerCAmelCase ( self ) -> str: _lowerCAmelCase =XLMProphetNetTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase ) _lowerCAmelCase =tokenizer.tokenize("""This is a test""" ) self.assertListEqual(__UpperCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) _lowerCAmelCase =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) _lowerCAmelCase =tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) self.assertListEqual( __UpperCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] , ) _lowerCAmelCase =tokenizer.convert_ids_to_tokens(__UpperCAmelCase ) self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """[UNK]""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """[UNK]""", """.""", ] , ) @cached_property def _lowerCAmelCase ( self ) -> int: return XLMProphetNetTokenizer.from_pretrained("""microsoft/xprophetnet-large-wiki100-cased""" ) @slow def _lowerCAmelCase ( self ) -> List[str]: _lowerCAmelCase ="""Hello World!""" _lowerCAmelCase =[3_53_89, 66_72, 49, 2] self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) ) @slow def _lowerCAmelCase ( self ) -> Any: # fmt: off _lowerCAmelCase ={"""input_ids""": [[1_10_73, 8_27_83, 18, 26, 8_27_83, 5_49, 5_15_40, 2_48, 1_72_09, 13_01, 2_17, 20, 21_51_86, 13_25, 1_47, 1_72_09, 13_01, 2_17, 20, 5_63_70, 53, 12_20_20, 20, 1_64_77, 27, 8_73_55, 45_48, 20, 47_28, 7_83_92, 17, 15_99_69, 18, 26, 2_44_91, 6_29, 15, 5_38, 2_27_04, 54_39, 15, 27_88, 2_44_91, 98_85, 15, 4_35_34, 6_05, 15, 8_14, 1_84_03, 3_32_00, 29, 15, 4_35_34, 2_44_58, 1_24_10, 1_11, 2_49_66, 8_36_69, 96_37, 14_40_68, 26, 8_50, 2_23_46, 27, 1_47, 2_49_66, 8_36_69, 8_34_90, 26, 3_91_13, 7_35, 27, 6_89, 6_56, 28_00, 13_39, 46_00, 53, 12_20_20, 11_57_85, 34, 8_16, 13_39, 4_68_87, 18, 1_47, 5_39_05, 19_51, 4_22_38, 4_11_70, 1_77_32, 8_34, 4_36, 15, 2_75_23, 9_87_33, 2_17, 1_47, 55_42, 49_81, 9_30, 1_73_47, 16, 2], [2_00_91, 6_29, 94, 8_27_86, 58, 4_90, 20, 15_28, 84, 5_39_05, 3_44, 8_05_92, 11_01_28, 1_88_22, 52_67, 13_06, 62, 15_25_37, 3_08, 79_97, 4_01, 12_44_27, 5_49, 3_54_42, 2_25, 1_09, 1_50_55, 2_57_48, 1_47, 71_19, 4_37_12, 34, 7_67, 13_53_66, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_92, 6_37_84, 11_94_66, 17, 14_78_08, 8_82_14, 18, 6_56, 81, 32, 32_96, 1_02_80, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCAmelCase , model_name="""microsoft/xprophetnet-large-wiki100-cased""" , revision="""1acad1643ddd54a44df6a1b797ada8373685d90e""" , )
341
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =1 _lowerCAmelCase =3 _lowerCAmelCase =(32, 32) _lowerCAmelCase =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase ) return image @property def _lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) _lowerCAmelCase =UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , ) return model @property def _lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) _lowerCAmelCase =AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) return model @property def _lowerCAmelCase ( self ) -> Optional[Any]: torch.manual_seed(0 ) _lowerCAmelCase =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , ) return CLIPTextModel(__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase =self.dummy_cond_unet_upscale _lowerCAmelCase =DDPMScheduler() _lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" ) _lowerCAmelCase =self.dummy_vae _lowerCAmelCase =self.dummy_text_encoder _lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk _lowerCAmelCase =StableDiffusionUpscalePipeline( unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , ) _lowerCAmelCase =sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase ="""A painting of a squirrel eating a burger""" _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) _lowerCAmelCase =output.images _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=__UpperCAmelCase , )[0] _lowerCAmelCase =image[0, -3:, -3:, -1] _lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1] _lowerCAmelCase =low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) _lowerCAmelCase =np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase =self.dummy_cond_unet_upscale _lowerCAmelCase =DDPMScheduler() _lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" ) _lowerCAmelCase =self.dummy_vae _lowerCAmelCase =self.dummy_text_encoder _lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk _lowerCAmelCase =StableDiffusionUpscalePipeline( unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , ) _lowerCAmelCase =sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase ="""A painting of a squirrel eating a burger""" _lowerCAmelCase =sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) _lowerCAmelCase =output.images assert image.shape[0] == 2 _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) _lowerCAmelCase =output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =self.dummy_cond_unet_upscale _lowerCAmelCase =DDPMScheduler() _lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" ) _lowerCAmelCase =self.dummy_vae _lowerCAmelCase =self.dummy_text_encoder _lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 _lowerCAmelCase =unet.half() _lowerCAmelCase =text_encoder.half() # make sure here that pndm scheduler skips prk _lowerCAmelCase =StableDiffusionUpscalePipeline( unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , ) _lowerCAmelCase =sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase ="""A painting of a squirrel eating a burger""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , ).images _lowerCAmelCase =low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) _lowerCAmelCase =load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat.npy""" ) _lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler""" _lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _lowerCAmelCase ="""a cat sitting on a park bench""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , ) _lowerCAmelCase =output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 1e-3 def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) _lowerCAmelCase =load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat_fp16.npy""" ) _lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler""" _lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _lowerCAmelCase ="""a cat sitting on a park bench""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , ) _lowerCAmelCase =output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 5e-1 def _lowerCAmelCase ( self ) -> Optional[Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _lowerCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) _lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler""" _lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() _lowerCAmelCase ="""a cat sitting on a park bench""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , output_type="""np""" , ) _lowerCAmelCase =torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
341
1
"""simple docstring""" import heapq as hq import math from collections.abc import Iterator class lowerCamelCase__ : '''simple docstring''' def __init__( self , __UpperCAmelCase ) -> Optional[Any]: _lowerCAmelCase =str(id_ ) _lowerCAmelCase =None _lowerCAmelCase =None _lowerCAmelCase =[] _lowerCAmelCase ={} # {vertex:distance} def __lt__( self , __UpperCAmelCase ) -> Union[str, Any]: return self.key < other.key def __repr__( self ) -> Any: return self.id def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]: self.neighbors.append(__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> str: _lowerCAmelCase =weight def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]: # add the neighbors: graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , __UpperCamelCase ) graph[b - 1].add_edge(graph[a - 1] , __UpperCamelCase ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> list: _lowerCAmelCase =[] for u in graph: _lowerCAmelCase =math.inf _lowerCAmelCase =None _lowerCAmelCase =0 _lowerCAmelCase =graph[:] while q: _lowerCAmelCase =min(__UpperCamelCase ) q.remove(__UpperCamelCase ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): _lowerCAmelCase =u _lowerCAmelCase =u.edges[v.id] for i in range(1 , len(__UpperCamelCase ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Iterator[tuple]: for u in graph: _lowerCAmelCase =math.inf _lowerCAmelCase =None _lowerCAmelCase =0 _lowerCAmelCase =list(__UpperCamelCase ) hq.heapify(__UpperCamelCase ) while h: _lowerCAmelCase =hq.heappop(__UpperCamelCase ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): _lowerCAmelCase =u _lowerCAmelCase =u.edges[v.id] hq.heapify(__UpperCamelCase ) for i in range(1 , len(__UpperCamelCase ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def _lowerCamelCase() -> None: pass if __name__ == "__main__": import doctest doctest.testmod()
341
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = '''cvt''' def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[64, 1_92, 3_84] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[4.0, 4.0, 4.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.1] , __UpperCAmelCase=[True, True, True] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , __UpperCAmelCase=[3, 3, 3] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-12 , **__UpperCAmelCase , ) -> Optional[Any]: super().__init__(**__UpperCAmelCase ) _lowerCAmelCase =num_channels _lowerCAmelCase =patch_sizes _lowerCAmelCase =patch_stride _lowerCAmelCase =patch_padding _lowerCAmelCase =embed_dim _lowerCAmelCase =num_heads _lowerCAmelCase =depth _lowerCAmelCase =mlp_ratio _lowerCAmelCase =attention_drop_rate _lowerCAmelCase =drop_rate _lowerCAmelCase =drop_path_rate _lowerCAmelCase =qkv_bias _lowerCAmelCase =cls_token _lowerCAmelCase =qkv_projection_method _lowerCAmelCase =kernel_qkv _lowerCAmelCase =padding_kv _lowerCAmelCase =stride_kv _lowerCAmelCase =padding_q _lowerCAmelCase =stride_q _lowerCAmelCase =initializer_range _lowerCAmelCase =layer_norm_eps
341
1
"""simple docstring""" import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase =10 def _lowerCAmelCase ( self ) -> List[str]: _lowerCAmelCase =[1, 2, 3, 4] _lowerCAmelCase =[1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(__UpperCAmelCase , self.block_size , 0 ) , __UpperCAmelCase ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =[1, 2, 3, 4, 5, 6, 7, 8, 9, 10] _lowerCAmelCase =[1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(__UpperCAmelCase , self.block_size , 0 ) , __UpperCAmelCase ) def _lowerCAmelCase ( self ) -> str: _lowerCAmelCase =[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] _lowerCAmelCase =[1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(__UpperCAmelCase , self.block_size , 0 ) , __UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Optional[int]: _lowerCAmelCase ="""It was the year of Our Lord one thousand seven hundred and seventy-five.\n\nSpiritual revelations were conceded to England at that favoured period, as at this.""" _lowerCAmelCase , _lowerCAmelCase =process_story(__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , [] ) def _lowerCAmelCase ( self ) -> Dict: _lowerCAmelCase ="""""" _lowerCAmelCase , _lowerCAmelCase =process_story(__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , [] ) self.assertEqual(__UpperCAmelCase , [] ) def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =( """It was the year of Our Lord one thousand seven hundred and """ """seventy-five\n\nSpiritual revelations were conceded to England """ """at that favoured period, as at this.\n@highlight\n\nIt was the best of times""" ) _lowerCAmelCase , _lowerCAmelCase =process_story(__UpperCAmelCase ) _lowerCAmelCase =[ """It was the year of Our Lord one thousand seven hundred and seventy-five.""", """Spiritual revelations were conceded to England at that favoured period, as at this.""", ] self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) _lowerCAmelCase =["""It was the best of times."""] self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Dict: _lowerCAmelCase =torch.tensor([1, 2, 3, 4] ) _lowerCAmelCase =torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(__UpperCAmelCase , 0 ).numpy() , expected.numpy() ) def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =torch.tensor([1, 2, 3, 4, 23, 23, 23] ) _lowerCAmelCase =torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(__UpperCAmelCase , 23 ).numpy() , expected.numpy() ) def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =torch.tensor([8, 2, 3, 4, 1, 1, 1] ) _lowerCAmelCase =torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(__UpperCAmelCase , 1 ).numpy() , expected.numpy() ) def _lowerCAmelCase ( self ) -> Dict: _lowerCAmelCase =1_01 _lowerCAmelCase =torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_01, 5, 6], [1, 1_01, 3, 4, 1_01, 6]] ) _lowerCAmelCase =torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) _lowerCAmelCase =compute_token_type_ids(__UpperCAmelCase , __UpperCAmelCase ) np.testing.assert_array_equal(__UpperCAmelCase , __UpperCAmelCase )
341
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = ['''image_processor''', '''tokenizer'''] lowerCamelCase = '''CLIPImageProcessor''' lowerCamelCase = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''') def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Union[str, Any]: _lowerCAmelCase =None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __UpperCAmelCase , ) _lowerCAmelCase =kwargs.pop("""feature_extractor""" ) _lowerCAmelCase =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Optional[Any]: if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: _lowerCAmelCase =self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if images is not None: _lowerCAmelCase =self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if text is not None and images is not None: _lowerCAmelCase =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase ) def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @property def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =self.tokenizer.model_input_names _lowerCAmelCase =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
341
1
"""simple docstring""" from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase__ : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=12 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=0 , __UpperCAmelCase=None , ) -> Dict: _lowerCAmelCase =parent _lowerCAmelCase =batch_size _lowerCAmelCase =seq_length _lowerCAmelCase =is_training _lowerCAmelCase =use_input_mask _lowerCAmelCase =use_labels _lowerCAmelCase =vocab_size _lowerCAmelCase =hidden_size _lowerCAmelCase =projection_dim _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads _lowerCAmelCase =intermediate_size _lowerCAmelCase =dropout _lowerCAmelCase =attention_dropout _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =initializer_range _lowerCAmelCase =scope _lowerCAmelCase =bos_token_id def _lowerCAmelCase ( self ) -> str: _lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCAmelCase =None if self.use_input_mask: _lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: _lowerCAmelCase =input_mask.numpy() _lowerCAmelCase , _lowerCAmelCase =input_mask.shape _lowerCAmelCase =np.random.randint(1 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(__UpperCAmelCase ): _lowerCAmelCase =1 _lowerCAmelCase =0 _lowerCAmelCase =self.get_config() return config, input_ids, tf.convert_to_tensor(__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Optional[int]: return BlipTextConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: _lowerCAmelCase =TFBlipTextModel(config=__UpperCAmelCase ) _lowerCAmelCase =model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , training=__UpperCAmelCase ) _lowerCAmelCase =model(__UpperCAmelCase , training=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _lowerCAmelCase ( self ) -> str: _lowerCAmelCase =self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs _lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = (TFBlipTextModel,) if is_tf_available() else () lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def _lowerCAmelCase ( self ) -> List[str]: _lowerCAmelCase =BlipTextModelTester(self ) _lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def _lowerCAmelCase ( self ) -> Tuple: self.config_tester.run_common_tests() def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Tuple: pass def _lowerCAmelCase ( self ) -> Optional[Any]: pass @unittest.skip(reason="""Blip does not use inputs_embeds""" ) def _lowerCAmelCase ( self ) -> Dict: pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def _lowerCAmelCase ( self ) -> int: pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def _lowerCAmelCase ( self ) -> int: pass @slow def _lowerCAmelCase ( self ) -> Any: for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase =TFBlipTextModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase=True ) -> List[Any]: super().test_pt_tf_model_equivalence(allow_missing_keys=__UpperCAmelCase )
341
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __A = { 'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'], 'tokenization_perceiver': ['PerceiverTokenizer'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['PerceiverFeatureExtractor'] __A = ['PerceiverImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PerceiverForImageClassificationConvProcessing', 'PerceiverForImageClassificationFourier', 'PerceiverForImageClassificationLearned', 'PerceiverForMaskedLM', 'PerceiverForMultimodalAutoencoding', 'PerceiverForOpticalFlow', 'PerceiverForSequenceClassification', 'PerceiverLayer', 'PerceiverModel', 'PerceiverPreTrainedModel', ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowerCamelCase__ ( metaclass=__magic_name__ ): '''simple docstring''' lowerCamelCase = ['''flax''', '''transformers'''] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: requires_backends(self , ["""flax""", """transformers"""] ) @classmethod def _lowerCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> int: requires_backends(cls , ["""flax""", """transformers"""] ) @classmethod def _lowerCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["""flax""", """transformers"""] ) class lowerCamelCase__ ( metaclass=__magic_name__ ): '''simple docstring''' lowerCamelCase = ['''flax''', '''transformers'''] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: requires_backends(self , ["""flax""", """transformers"""] ) @classmethod def _lowerCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]: requires_backends(cls , ["""flax""", """transformers"""] ) @classmethod def _lowerCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any: requires_backends(cls , ["""flax""", """transformers"""] ) class lowerCamelCase__ ( metaclass=__magic_name__ ): '''simple docstring''' lowerCamelCase = ['''flax''', '''transformers'''] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: requires_backends(self , ["""flax""", """transformers"""] ) @classmethod def _lowerCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: requires_backends(cls , ["""flax""", """transformers"""] ) @classmethod def _lowerCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: requires_backends(cls , ["""flax""", """transformers"""] ) class lowerCamelCase__ ( metaclass=__magic_name__ ): '''simple docstring''' lowerCamelCase = ['''flax''', '''transformers'''] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: requires_backends(self , ["""flax""", """transformers"""] ) @classmethod def _lowerCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]: requires_backends(cls , ["""flax""", """transformers"""] ) @classmethod def _lowerCAmelCase ( cls , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]: requires_backends(cls , ["""flax""", """transformers"""] )
341
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = { 'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Swinv2ForImageClassification', 'Swinv2ForMaskedImageModeling', 'Swinv2Model', 'Swinv2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
1
"""simple docstring""" from math import isclose, sqrt def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> tuple[float, float, float]: _lowerCAmelCase =point_y / 4 / point_x _lowerCAmelCase =2 * normal_gradient / (1 + normal_gradient * normal_gradient) _lowerCAmelCase =(1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) _lowerCAmelCase =(sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 _lowerCAmelCase =outgoing_gradient**2 + 4 _lowerCAmelCase =2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) _lowerCAmelCase =(point_y - outgoing_gradient * point_x) ** 2 - 100 _lowerCAmelCase =( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) _lowerCAmelCase =( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point _lowerCAmelCase =x_minus if isclose(__UpperCamelCase , __UpperCamelCase ) else x_plus _lowerCAmelCase =point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def _lowerCamelCase(__UpperCamelCase = 1.4 , __UpperCamelCase = -9.6 ) -> int: _lowerCAmelCase =0 _lowerCAmelCase =first_x_coord _lowerCAmelCase =first_y_coord _lowerCAmelCase =(10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =next_point(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(F"""{solution() = }""")
341
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=1 ) -> Tuple: if n_shave_prefix_segments >= 0: return ".".join(path.split(""".""" )[n_shave_prefix_segments:] ) else: return ".".join(path.split(""".""" )[:n_shave_prefix_segments] ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> List[str]: _lowerCAmelCase =[] for old_item in old_list: _lowerCAmelCase =old_item.replace("""in_layers.0""" , """norm1""" ) _lowerCAmelCase =new_item.replace("""in_layers.2""" , """conv1""" ) _lowerCAmelCase =new_item.replace("""out_layers.0""" , """norm2""" ) _lowerCAmelCase =new_item.replace("""out_layers.3""" , """conv2""" ) _lowerCAmelCase =new_item.replace("""emb_layers.1""" , """time_emb_proj""" ) _lowerCAmelCase =new_item.replace("""skip_connection""" , """conv_shortcut""" ) _lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> Tuple: _lowerCAmelCase =[] for old_item in old_list: _lowerCAmelCase =old_item _lowerCAmelCase =new_item.replace("""norm.weight""" , """group_norm.weight""" ) _lowerCAmelCase =new_item.replace("""norm.bias""" , """group_norm.bias""" ) _lowerCAmelCase =new_item.replace("""proj_out.weight""" , """proj_attn.weight""" ) _lowerCAmelCase =new_item.replace("""proj_out.bias""" , """proj_attn.bias""" ) _lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None ) -> Optional[int]: assert isinstance(__UpperCamelCase , __UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): _lowerCAmelCase =old_checkpoint[path] _lowerCAmelCase =old_tensor.shape[0] // 3 _lowerCAmelCase =(-1, channels) if len(old_tensor.shape ) == 3 else (-1) _lowerCAmelCase =old_tensor.shape[0] // config["""num_head_channels"""] // 3 _lowerCAmelCase =old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =old_tensor.split(channels // num_heads , dim=1 ) _lowerCAmelCase =query.reshape(__UpperCamelCase ) _lowerCAmelCase =key.reshape(__UpperCamelCase ) _lowerCAmelCase =value.reshape(__UpperCamelCase ) for path in paths: _lowerCAmelCase =path["""new"""] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here _lowerCAmelCase =new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" ) _lowerCAmelCase =new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" ) _lowerCAmelCase =new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" ) if additional_replacements is not None: for replacement in additional_replacements: _lowerCAmelCase =new_path.replace(replacement["""old"""] , replacement["""new"""] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: _lowerCAmelCase =old_checkpoint[path["""old"""]][:, :, 0] else: _lowerCAmelCase =old_checkpoint[path["""old"""]] def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: _lowerCAmelCase ={} _lowerCAmelCase =checkpoint["""time_embed.0.weight"""] _lowerCAmelCase =checkpoint["""time_embed.0.bias"""] _lowerCAmelCase =checkpoint["""time_embed.2.weight"""] _lowerCAmelCase =checkpoint["""time_embed.2.bias"""] _lowerCAmelCase =checkpoint["""input_blocks.0.0.weight"""] _lowerCAmelCase =checkpoint["""input_blocks.0.0.bias"""] _lowerCAmelCase =checkpoint["""out.0.weight"""] _lowerCAmelCase =checkpoint["""out.0.bias"""] _lowerCAmelCase =checkpoint["""out.2.weight"""] _lowerCAmelCase =checkpoint["""out.2.bias"""] # Retrieves the keys for the input blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } # Retrieves the keys for the middle blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } # Retrieves the keys for the output blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } for i in range(1 , __UpperCamelCase ): _lowerCAmelCase =(i - 1) // (config["""num_res_blocks"""] + 1) _lowerCAmelCase =(i - 1) % (config["""num_res_blocks"""] + 1) _lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key] _lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key] if F'''input_blocks.{i}.0.op.weight''' in checkpoint: _lowerCAmelCase =checkpoint[ F'''input_blocks.{i}.0.op.weight''' ] _lowerCAmelCase =checkpoint[ F'''input_blocks.{i}.0.op.bias''' ] continue _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase ={"""old""": F'''input_blocks.{i}.0''', """new""": F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''} _lowerCAmelCase ={"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""} assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=__UpperCamelCase ) if len(__UpperCamelCase ): _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """old""": F'''input_blocks.{i}.1''', """new""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''', } _lowerCAmelCase ={ F'''input_blocks.{i}.1.qkv.bias''': { """key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', """query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', """value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''input_blocks.{i}.1.qkv.weight''': { """key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', """query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', """value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase , ) _lowerCAmelCase =middle_blocks[0] _lowerCAmelCase =middle_blocks[1] _lowerCAmelCase =middle_blocks[2] _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase ) _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase ) _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """middle_block.1.qkv.bias""": { """key""": """mid_block.attentions.0.key.bias""", """query""": """mid_block.attentions.0.query.bias""", """value""": """mid_block.attentions.0.value.bias""", }, """middle_block.1.qkv.weight""": { """key""": """mid_block.attentions.0.key.weight""", """query""": """mid_block.attentions.0.query.weight""", """value""": """mid_block.attentions.0.value.weight""", }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase ) for i in range(__UpperCamelCase ): _lowerCAmelCase =i // (config["""num_res_blocks"""] + 1) _lowerCAmelCase =i % (config["""num_res_blocks"""] + 1) _lowerCAmelCase =[shave_segments(__UpperCamelCase , 2 ) for name in output_blocks[i]] _lowerCAmelCase ={} for layer in output_block_layers: _lowerCAmelCase , _lowerCAmelCase =layer.split(""".""" )[0], shave_segments(__UpperCamelCase , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(__UpperCamelCase ) else: _lowerCAmelCase =[layer_name] if len(__UpperCamelCase ) > 1: _lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key] _lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key] _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase ={"""old""": F'''output_blocks.{i}.0''', """new""": F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''} assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): _lowerCAmelCase =list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] ) _lowerCAmelCase =checkpoint[ F'''output_blocks.{i}.{index}.conv.weight''' ] _lowerCAmelCase =checkpoint[ F'''output_blocks.{i}.{index}.conv.bias''' ] # Clear attentions as they have been attributed above. if len(__UpperCamelCase ) == 2: _lowerCAmelCase =[] if len(__UpperCamelCase ): _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """old""": F'''output_blocks.{i}.1''', """new""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''', } _lowerCAmelCase ={ F'''output_blocks.{i}.1.qkv.bias''': { """key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', """query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', """value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''output_blocks.{i}.1.qkv.weight''': { """key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', """query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', """value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=__UpperCamelCase , ) else: _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase , n_shave_prefix_segments=1 ) for path in resnet_0_paths: _lowerCAmelCase =""".""".join(["""output_blocks""", str(__UpperCamelCase ), path["""old"""]] ) _lowerCAmelCase =""".""".join(["""up_blocks""", str(__UpperCamelCase ), """resnets""", str(__UpperCamelCase ), path["""new"""]] ) _lowerCAmelCase =checkpoint[old_path] return new_checkpoint if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the architecture.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') __A = parser.parse_args() __A = torch.load(args.checkpoint_path) with open(args.config_file) as f: __A = json.loads(f.read()) __A = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] __A = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: __A = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1])) __A = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1])) __A = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
341
1
"""simple docstring""" import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]: _lowerCAmelCase =fname.split(os.path.sep )[-1] return re.search(R"""^(.*)_\d+\.jpg$""" , __UpperCamelCase ).groups()[0] class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Union[str, Any]: _lowerCAmelCase =file_names _lowerCAmelCase =image_transform _lowerCAmelCase =label_to_id def __len__( self ) -> Union[str, Any]: return len(self.file_names ) def __getitem__( self , __UpperCAmelCase ) -> List[Any]: _lowerCAmelCase =self.file_names[idx] _lowerCAmelCase =PIL.Image.open(__UpperCAmelCase ) _lowerCAmelCase =raw_image.convert("""RGB""" ) if self.image_transform is not None: _lowerCAmelCase =self.image_transform(__UpperCAmelCase ) _lowerCAmelCase =extract_label(__UpperCAmelCase ) if self.label_to_id is not None: _lowerCAmelCase =self.label_to_id[label] return {"image": image, "label": label} def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Any: # Initialize accelerator if args.with_tracking: _lowerCAmelCase =Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir ) else: _lowerCAmelCase =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _lowerCAmelCase =config["""lr"""] _lowerCAmelCase =int(config["""num_epochs"""] ) _lowerCAmelCase =int(config["""seed"""] ) _lowerCAmelCase =int(config["""batch_size"""] ) _lowerCAmelCase =config["""image_size"""] if not isinstance(__UpperCamelCase , (list, tuple) ): _lowerCAmelCase =(image_size, image_size) # Parse out whether we are saving every epoch or after a certain number of batches if hasattr(args.checkpointing_steps , """isdigit""" ): if args.checkpointing_steps == "epoch": _lowerCAmelCase =args.checkpointing_steps elif args.checkpointing_steps.isdigit(): _lowerCAmelCase =int(args.checkpointing_steps ) else: raise ValueError( F'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' ) else: _lowerCAmelCase =None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: _lowerCAmelCase =os.path.split(__UpperCamelCase )[-1].split(""".""" )[0] accelerator.init_trackers(__UpperCamelCase , __UpperCamelCase ) # Grab all the image filenames _lowerCAmelCase =[os.path.join(args.data_dir , __UpperCamelCase ) for fname in os.listdir(args.data_dir ) if fname.endswith(""".jpg""" )] # Build the label correspondences _lowerCAmelCase =[extract_label(__UpperCamelCase ) for fname in file_names] _lowerCAmelCase =list(set(__UpperCamelCase ) ) id_to_label.sort() _lowerCAmelCase ={lbl: i for i, lbl in enumerate(__UpperCamelCase )} # Set the seed before splitting the data. np.random.seed(__UpperCamelCase ) torch.manual_seed(__UpperCamelCase ) torch.cuda.manual_seed_all(__UpperCamelCase ) # Split our filenames between train and validation _lowerCAmelCase =np.random.permutation(len(__UpperCamelCase ) ) _lowerCAmelCase =int(0.8 * len(__UpperCamelCase ) ) _lowerCAmelCase =random_perm[:cut] _lowerCAmelCase =random_perm[cut:] # For training we use a simple RandomResizedCrop _lowerCAmelCase =Compose([RandomResizedCrop(__UpperCamelCase , scale=(0.5, 1.0) ), ToTensor()] ) _lowerCAmelCase =PetsDataset( [file_names[i] for i in train_split] , image_transform=__UpperCamelCase , label_to_id=__UpperCamelCase ) # For evaluation, we use a deterministic Resize _lowerCAmelCase =Compose([Resize(__UpperCamelCase ), ToTensor()] ) _lowerCAmelCase =PetsDataset([file_names[i] for i in eval_split] , image_transform=__UpperCamelCase , label_to_id=__UpperCamelCase ) # Instantiate dataloaders. _lowerCAmelCase =DataLoader(__UpperCamelCase , shuffle=__UpperCamelCase , batch_size=__UpperCamelCase , num_workers=4 ) _lowerCAmelCase =DataLoader(__UpperCamelCase , shuffle=__UpperCamelCase , batch_size=__UpperCamelCase , num_workers=4 ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _lowerCAmelCase =create_model("""resnet50d""" , pretrained=__UpperCamelCase , num_classes=len(__UpperCamelCase ) ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _lowerCAmelCase =model.to(accelerator.device ) # Freezing the base model for param in model.parameters(): _lowerCAmelCase =False for param in model.get_classifier().parameters(): _lowerCAmelCase =True # We normalize the batches of images to be a bit faster. _lowerCAmelCase =torch.tensor(model.default_cfg["""mean"""] )[None, :, None, None].to(accelerator.device ) _lowerCAmelCase =torch.tensor(model.default_cfg["""std"""] )[None, :, None, None].to(accelerator.device ) # Instantiate optimizer _lowerCAmelCase =torch.optim.Adam(params=model.parameters() , lr=lr / 25 ) # Instantiate learning rate scheduler _lowerCAmelCase =OneCycleLR(optimizer=__UpperCamelCase , max_lr=__UpperCamelCase , epochs=__UpperCamelCase , steps_per_epoch=len(__UpperCamelCase ) ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =accelerator.prepare( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # We need to keep track of how many total steps we have iterated over _lowerCAmelCase =0 # We also need to keep track of the starting epoch so files are named properly _lowerCAmelCase =0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(F'''Resumed from checkpoint: {args.resume_from_checkpoint}''' ) accelerator.load_state(args.resume_from_checkpoint ) _lowerCAmelCase =os.path.basename(args.resume_from_checkpoint ) else: # Get the most recent checkpoint _lowerCAmelCase =[f.name for f in os.scandir(os.getcwd() ) if f.is_dir()] dirs.sort(key=os.path.getctime ) _lowerCAmelCase =dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` _lowerCAmelCase =os.path.splitext(__UpperCamelCase )[0] if "epoch" in training_difference: _lowerCAmelCase =int(training_difference.replace("""epoch_""" , """""" ) ) + 1 _lowerCAmelCase =None else: _lowerCAmelCase =int(training_difference.replace("""step_""" , """""" ) ) _lowerCAmelCase =resume_step // len(__UpperCamelCase ) resume_step -= starting_epoch * len(__UpperCamelCase ) # Now we train the model for epoch in range(__UpperCamelCase , __UpperCamelCase ): model.train() if args.with_tracking: _lowerCAmelCase =0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step _lowerCAmelCase =accelerator.skip_first_batches(__UpperCamelCase , __UpperCamelCase ) overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader _lowerCAmelCase =train_dataloader for batch in active_dataloader: # We could avoid this line since we set the accelerator with `device_placement=True`. _lowerCAmelCase ={k: v.to(accelerator.device ) for k, v in batch.items()} _lowerCAmelCase =(batch["""image"""] - mean) / std _lowerCAmelCase =model(__UpperCamelCase ) _lowerCAmelCase =torch.nn.functional.cross_entropy(__UpperCamelCase , batch["""label"""] ) # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(__UpperCamelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 if isinstance(__UpperCamelCase , __UpperCamelCase ): _lowerCAmelCase =F'''step_{overall_step}''' if overall_step % checkpointing_steps == 0: if args.output_dir is not None: _lowerCAmelCase =os.path.join(args.output_dir , __UpperCamelCase ) accelerator.save_state(__UpperCamelCase ) model.eval() _lowerCAmelCase =0 _lowerCAmelCase =0 for step, batch in enumerate(__UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. _lowerCAmelCase ={k: v.to(accelerator.device ) for k, v in batch.items()} _lowerCAmelCase =(batch["""image"""] - mean) / std with torch.no_grad(): _lowerCAmelCase =model(__UpperCamelCase ) _lowerCAmelCase =outputs.argmax(dim=-1 ) _lowerCAmelCase , _lowerCAmelCase =accelerator.gather_for_metrics((predictions, batch["""label"""]) ) _lowerCAmelCase =predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() _lowerCAmelCase =accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}: {100 * eval_metric:.2f}''' ) if args.with_tracking: accelerator.log( { """accuracy""": 100 * eval_metric, """train_loss""": total_loss.item() / len(__UpperCamelCase ), """epoch""": epoch, } , step=__UpperCamelCase , ) if checkpointing_steps == "epoch": _lowerCAmelCase =F'''epoch_{epoch}''' if args.output_dir is not None: _lowerCAmelCase =os.path.join(args.output_dir , __UpperCamelCase ) accelerator.save_state(__UpperCamelCase ) if args.with_tracking: accelerator.end_training() def _lowerCamelCase() -> Optional[Any]: _lowerCAmelCase =argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument("""--data_dir""" , required=__UpperCamelCase , help="""The data folder on disk.""" ) parser.add_argument("""--fp16""" , action="""store_true""" , help="""If passed, will use FP16 training.""" ) parser.add_argument( """--mixed_precision""" , type=__UpperCamelCase , default=__UpperCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) parser.add_argument( """--checkpointing_steps""" , type=__UpperCamelCase , default=__UpperCamelCase , help="""Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.""" , ) parser.add_argument( """--output_dir""" , type=__UpperCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--resume_from_checkpoint""" , type=__UpperCamelCase , default=__UpperCamelCase , help="""If the training should continue from a checkpoint folder.""" , ) parser.add_argument( """--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , ) parser.add_argument( """--project_dir""" , type=__UpperCamelCase , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , ) _lowerCAmelCase =parser.parse_args() _lowerCAmelCase ={"""lr""": 3E-2, """num_epochs""": 3, """seed""": 42, """batch_size""": 64, """image_size""": 224} training_function(__UpperCamelCase , __UpperCamelCase ) if __name__ == "__main__": main()
341
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]: _lowerCAmelCase =0 _lowerCAmelCase =len(__UpperCamelCase ) for i in range(n - 1 ): for j in range(i + 1 , __UpperCamelCase ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def _lowerCamelCase(__UpperCamelCase ) -> List[Any]: if len(__UpperCamelCase ) <= 1: return arr, 0 _lowerCAmelCase =len(__UpperCamelCase ) // 2 _lowerCAmelCase =arr[0:mid] _lowerCAmelCase =arr[mid:] _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =_count_cross_inversions(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =inversion_p + inversions_q + cross_inversions return c, num_inversions def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Any: _lowerCAmelCase =[] _lowerCAmelCase =_lowerCAmelCase =_lowerCAmelCase =0 while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(__UpperCamelCase ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(__UpperCamelCase ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def _lowerCamelCase() -> str: _lowerCAmelCase =[10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) _lowerCAmelCase =count_inversions_bf(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 8 print("""number of inversions = """ , __UpperCamelCase ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() _lowerCAmelCase =count_inversions_bf(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , __UpperCamelCase ) # an empty list should also have zero inversions _lowerCAmelCase =[] _lowerCAmelCase =count_inversions_bf(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , __UpperCamelCase ) if __name__ == "__main__": main()
341
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST', 'ViTMSNModel', 'ViTMSNForImageClassification', 'ViTMSNPreTrainedModel', ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
"""simple docstring""" import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class lowerCamelCase__ : '''simple docstring''' lowerCamelCase = None lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = None lowerCamelCase = None lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = True lowerCamelCase = None lowerCamelCase = 1 lowerCamelCase = None lowerCamelCase = False lowerCamelCase = None lowerCamelCase = None def _lowerCAmelCase ( self ) -> "DownloadConfig": return self.__class__(**{k: copy.deepcopy(__UpperCAmelCase ) for k, v in self.__dict__.items()} )
341
1
"""simple docstring""" from random import randint from tempfile import TemporaryFile import numpy as np def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]: _lowerCAmelCase =0 if start < end: _lowerCAmelCase =randint(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =a[end] _lowerCAmelCase =a[pivot] _lowerCAmelCase =temp _lowerCAmelCase , _lowerCAmelCase =_in_place_partition(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) count += _in_place_quick_sort(__UpperCamelCase , __UpperCamelCase , p - 1 ) count += _in_place_quick_sort(__UpperCamelCase , p + 1 , __UpperCamelCase ) return count def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple: _lowerCAmelCase =0 _lowerCAmelCase =randint(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =a[end] _lowerCAmelCase =a[pivot] _lowerCAmelCase =temp _lowerCAmelCase =start - 1 for index in range(__UpperCamelCase , __UpperCamelCase ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value _lowerCAmelCase =new_pivot_index + 1 _lowerCAmelCase =a[new_pivot_index] _lowerCAmelCase =a[index] _lowerCAmelCase =temp _lowerCAmelCase =a[new_pivot_index + 1] _lowerCAmelCase =a[end] _lowerCAmelCase =temp return new_pivot_index + 1, count __A = TemporaryFile() __A = 100 # 1000 elements are to be sorted __A , __A = 0, 1 # mean and standard deviation __A = np.random.normal(mu, sigma, p) np.save(outfile, X) print('The array is') print(X) outfile.seek(0) # using the same array __A = np.load(outfile) __A = len(M) - 1 __A = _in_place_quick_sort(M, 0, r) print( 'No of Comparisons for 100 elements selected from a standard normal distribution' 'is :' ) print(z)
341
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> int: return int((input_a, input_a).count(1 ) != 0 ) def _lowerCamelCase() -> None: assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
341
1
"""simple docstring""" class lowerCamelCase__ : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any: _lowerCAmelCase =name _lowerCAmelCase =value _lowerCAmelCase =weight def __repr__( self ) -> List[Any]: return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def _lowerCAmelCase ( self ) -> List[str]: return self.value def _lowerCAmelCase ( self ) -> Dict: return self.name def _lowerCAmelCase ( self ) -> str: return self.weight def _lowerCAmelCase ( self ) -> List[str]: return self.value / self.weight def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]: _lowerCAmelCase =[] for i in range(len(__UpperCamelCase ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Tuple: _lowerCAmelCase =sorted(__UpperCamelCase , key=__UpperCamelCase , reverse=__UpperCamelCase ) _lowerCAmelCase =[] _lowerCAmelCase , _lowerCAmelCase =0.0, 0.0 for i in range(len(__UpperCamelCase ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def _lowerCamelCase() -> List[Any]: pass if __name__ == "__main__": import doctest doctest.testmod()
341
"""simple docstring""" import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py __A = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n' __A = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n' __A = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): '''simple docstring''' def _lowerCAmelCase ( self ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[ """https://en.wikipedia.org/wiki/BLEU""", """https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""", ] , ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=4 , __UpperCAmelCase=False ) -> Tuple: _lowerCAmelCase =compute_bleu( reference_corpus=__UpperCAmelCase , translation_corpus=__UpperCAmelCase , max_order=__UpperCAmelCase , smooth=__UpperCAmelCase ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) =score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
341
1
"""simple docstring""" from __future__ import annotations def _lowerCamelCase(__UpperCamelCase ) -> list: if len(__UpperCamelCase ) == 0: return [] _lowerCAmelCase , _lowerCAmelCase =min(__UpperCamelCase ), max(__UpperCamelCase ) _lowerCAmelCase =int(max_value - min_value ) + 1 _lowerCAmelCase =[[] for _ in range(__UpperCamelCase )] for i in my_list: buckets[int(i - min_value )].append(__UpperCamelCase ) return [v for bucket in buckets for v in sorted(__UpperCamelCase )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
341
"""simple docstring""" import argparse from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--original_config_file', type=str, required=True, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--image_size', default=512, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') def _lowerCamelCase(__UpperCamelCase ) -> List[str]: if string == "True": return True elif string == "False": return False else: raise ValueError(F'''could not parse string as bool {string}''' ) parser.add_argument( '--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool ) parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int) __A = parser.parse_args() __A = download_controlnet_from_original_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, extract_ema=args.extract_ema, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, use_linear_projection=args.use_linear_projection, cross_attention_dim=args.cross_attention_dim, ) controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
341
1
"""simple docstring""" import string def _lowerCamelCase(__UpperCamelCase ) -> None: for key in range(len(string.ascii_uppercase ) ): _lowerCAmelCase ="""""" for symbol in message: if symbol in string.ascii_uppercase: _lowerCAmelCase =string.ascii_uppercase.find(__UpperCamelCase ) _lowerCAmelCase =num - key if num < 0: _lowerCAmelCase =num + len(string.ascii_uppercase ) _lowerCAmelCase =translated + string.ascii_uppercase[num] else: _lowerCAmelCase =translated + symbol print(F'''Decryption using Key #{key}: {translated}''' ) def _lowerCamelCase() -> None: _lowerCAmelCase =input("""Encrypted message: """ ) _lowerCAmelCase =message.upper() decrypt(__UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() main()
341
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available __A = { 'configuration_audio_spectrogram_transformer': [ 'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ASTConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'ASTForAudioClassification', 'ASTModel', 'ASTPreTrainedModel', ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['ASTFeatureExtractor'] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
1
"""simple docstring""" import os import zipfile import pytest from datasets.utils.extract import ( BzipaExtractor, Extractor, GzipExtractor, LzaExtractor, SevenZipExtractor, TarExtractor, XzExtractor, ZipExtractor, ZstdExtractor, ) from .utils import require_lza, require_pyazr, require_zstandard @pytest.mark.parametrize( """compression_format, is_archive""" , [ ("""7z""", True), ("""bz2""", False), ("""gzip""", False), ("""lz4""", False), ("""tar""", True), ("""xz""", False), ("""zip""", True), ("""zstd""", False), ] , ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> Optional[int]: _lowerCAmelCase ={ """7z""": (seven_zip_file, SevenZipExtractor), """bz2""": (bza_file, BzipaExtractor), """gzip""": (gz_file, GzipExtractor), """lz4""": (lza_file, LzaExtractor), """tar""": (tar_file, TarExtractor), """xz""": (xz_file, XzExtractor), """zip""": (zip_file, ZipExtractor), """zstd""": (zstd_file, ZstdExtractor), } _lowerCAmelCase , _lowerCAmelCase =input_paths_and_base_extractors[compression_format] if input_path is None: _lowerCAmelCase =F'''for \'{compression_format}\' compression_format, ''' if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(__UpperCamelCase ) assert base_extractor.is_extractable(__UpperCamelCase ) _lowerCAmelCase =tmp_path / ("""extracted""" if is_archive else """extracted.txt""") base_extractor.extract(__UpperCamelCase , __UpperCamelCase ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name _lowerCAmelCase =file_path.read_text(encoding="""utf-8""" ) else: _lowerCAmelCase =output_path.read_text(encoding="""utf-8""" ) _lowerCAmelCase =text_file.read_text(encoding="""utf-8""" ) assert extracted_file_content == expected_file_content @pytest.mark.parametrize( """compression_format, is_archive""" , [ ("""7z""", True), ("""bz2""", False), ("""gzip""", False), ("""lz4""", False), ("""tar""", True), ("""xz""", False), ("""zip""", True), ("""zstd""", False), ] , ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> Any: _lowerCAmelCase ={ """7z""": seven_zip_file, """bz2""": bza_file, """gzip""": gz_file, """lz4""": lza_file, """tar""": tar_file, """xz""": xz_file, """zip""": zip_file, """zstd""": zstd_file, } _lowerCAmelCase =input_paths[compression_format] if input_path is None: _lowerCAmelCase =F'''for \'{compression_format}\' compression_format, ''' if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(__UpperCamelCase ) _lowerCAmelCase =Extractor.infer_extractor_format(__UpperCamelCase ) assert extractor_format is not None _lowerCAmelCase =tmp_path / ("""extracted""" if is_archive else """extracted.txt""") Extractor.extract(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name _lowerCAmelCase =file_path.read_text(encoding="""utf-8""" ) else: _lowerCAmelCase =output_path.read_text(encoding="""utf-8""" ) _lowerCAmelCase =text_file.read_text(encoding="""utf-8""" ) assert extracted_file_content == expected_file_content @pytest.fixture def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> List[str]: import tarfile _lowerCAmelCase =tmp_path / """data_dot_dot""" directory.mkdir() _lowerCAmelCase =directory / """tar_file_with_dot_dot.tar""" with tarfile.TarFile(__UpperCamelCase , """w""" ) as f: f.add(__UpperCamelCase , arcname=os.path.join("""..""" , text_file.name ) ) return path @pytest.fixture def _lowerCamelCase(__UpperCamelCase ) -> List[str]: import tarfile _lowerCAmelCase =tmp_path / """data_sym_link""" directory.mkdir() _lowerCAmelCase =directory / """tar_file_with_sym_link.tar""" os.symlink("""..""" , directory / """subdir""" , target_is_directory=__UpperCamelCase ) with tarfile.TarFile(__UpperCamelCase , """w""" ) as f: f.add(str(directory / """subdir""" ) , arcname="""subdir""" ) # str required by os.readlink on Windows and Python < 3.8 return path @pytest.mark.parametrize( """insecure_tar_file, error_log""" , [("""tar_file_with_dot_dot""", """illegal path"""), ("""tar_file_with_sym_link""", """Symlink""")] , ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict: _lowerCAmelCase ={ """tar_file_with_dot_dot""": tar_file_with_dot_dot, """tar_file_with_sym_link""": tar_file_with_sym_link, } _lowerCAmelCase =insecure_tar_files[insecure_tar_file] _lowerCAmelCase =tmp_path / """extracted""" TarExtractor.extract(__UpperCamelCase , __UpperCamelCase ) assert caplog.text for record in caplog.records: assert record.levelname == "ERROR" assert error_log in record.msg def _lowerCamelCase(__UpperCamelCase ) -> Any: # We should have less false positives than zipfile.is_zipfile # We do that by checking only the magic number _lowerCAmelCase =tmpdir / """not_a_zip_file""" # From: https://github.com/python/cpython/pull/5053 _lowerCAmelCase =( b"""\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00""" b"""\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I""" b"""DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07""" b"""\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82""" ) with not_a_zip_file.open("""wb""" ) as f: f.write(__UpperCamelCase ) assert zipfile.is_zipfile(str(__UpperCamelCase ) ) # is a false positive for `zipfile` assert not ZipExtractor.is_extractable(__UpperCamelCase ) # but we're right
341
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __A = { 'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'], 'tokenization_m2m_100': ['M2M100Tokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST', 'M2M100ForConditionalGeneration', 'M2M100Model', 'M2M100PreTrainedModel', ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
1
"""simple docstring""" import argparse from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--original_config_file', type=str, required=True, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--image_size', default=512, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') def _lowerCamelCase(__UpperCamelCase ) -> List[str]: if string == "True": return True elif string == "False": return False else: raise ValueError(F'''could not parse string as bool {string}''' ) parser.add_argument( '--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool ) parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int) __A = parser.parse_args() __A = download_controlnet_from_original_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, extract_ema=args.extract_ema, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, use_linear_projection=args.use_linear_projection, cross_attention_dim=args.cross_attention_dim, ) controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
341
"""simple docstring""" import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets __A = datasets.logging.get_logger(__name__) __A = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n' __A = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n' __A = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n' def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="dummy_doc" ) -> Dict: _lowerCAmelCase ={doc: key_lines} _lowerCAmelCase ={doc: sys_lines} _lowerCAmelCase ={} _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase ) key_singletons_num += singletons_num if NP_only or min_span: _lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , sys_doc_lines[doc] , __UpperCamelCase ) sys_singletons_num += singletons_num if NP_only or min_span: _lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase ) if remove_nested: _lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters _lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters _lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =(key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( """Number of removed nested coreferring mentions in the key """ F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' ) logger.info( """Number of resulting singleton clusters in the key """ F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' ) if not keep_singletons: logger.info( F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ''' """files, respectively""" ) return doc_coref_infos def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int: _lowerCAmelCase =get_coref_infos(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase ={} _lowerCAmelCase =0 _lowerCAmelCase =0 for name, metric in metrics: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =evaluator.evaluate_documents(__UpperCamelCase , __UpperCamelCase , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} ) logger.info( name.ljust(10 ) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , ) if conll_subparts_num == 3: _lowerCAmelCase =(conll / 3) * 100 logger.info(F'''CoNLL score: {conll:.2f}''' ) output_scores.update({"""conll_score""": conll} ) return output_scores def _lowerCamelCase(__UpperCamelCase ) -> Tuple: _lowerCAmelCase =False for line in key_lines: if not line.startswith("""#""" ): if len(line.split() ) > 6: _lowerCAmelCase =line.split()[5] if not parse_col == "-": _lowerCAmelCase =True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): '''simple docstring''' def _lowerCAmelCase ( self ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Sequence(datasets.Value("""string""" ) ), } ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[ """https://github.com/ns-moosavi/coval""", """https://www.aclweb.org/anthology/P16-1060""", """http://www.conll.cemantix.org/2012/data.html""", ] , ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False ) -> Optional[Any]: _lowerCAmelCase =[ ("""mentions""", evaluator.mentions), ("""muc""", evaluator.muc), ("""bcub""", evaluator.b_cubed), ("""ceafe""", evaluator.ceafe), ("""lea""", evaluator.lea), ] if min_span: _lowerCAmelCase =util.check_gold_parse_annotation(__UpperCAmelCase ) if not has_gold_parse: raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" _lowerCAmelCase =evaluate( key_lines=__UpperCAmelCase , sys_lines=__UpperCAmelCase , metrics=__UpperCAmelCase , NP_only=__UpperCAmelCase , remove_nested=__UpperCAmelCase , keep_singletons=__UpperCAmelCase , min_span=__UpperCAmelCase , ) return score
341
1
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = { 'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'], 'feature_extraction_mctct': ['MCTCTFeatureExtractor'], 'processing_mctct': ['MCTCTProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MCTCTForCTC', 'MCTCTModel', 'MCTCTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
"""simple docstring""" from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class lowerCamelCase__ : '''simple docstring''' lowerCamelCase = XGLMConfig lowerCamelCase = {} lowerCamelCase = '''gelu''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=0.0_2 , ) -> List[str]: _lowerCAmelCase =parent _lowerCAmelCase =batch_size _lowerCAmelCase =seq_length _lowerCAmelCase =is_training _lowerCAmelCase =use_input_mask _lowerCAmelCase =use_labels _lowerCAmelCase =vocab_size _lowerCAmelCase =d_model _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads _lowerCAmelCase =ffn_dim _lowerCAmelCase =activation_function _lowerCAmelCase =activation_dropout _lowerCAmelCase =attention_dropout _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =initializer_range _lowerCAmelCase =None _lowerCAmelCase =0 _lowerCAmelCase =2 _lowerCAmelCase =1 def _lowerCAmelCase ( self ) -> Dict: return XGLMConfig.from_pretrained("""facebook/xglm-564M""" ) def _lowerCAmelCase ( self ) -> str: _lowerCAmelCase =tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) _lowerCAmelCase =None if self.use_input_mask: _lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase =self.get_config() _lowerCAmelCase =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def _lowerCAmelCase ( self ) -> str: return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__UpperCAmelCase , ) def _lowerCAmelCase ( self ) -> Dict: _lowerCAmelCase =self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) =config_and_inputs _lowerCAmelCase ={ """input_ids""": input_ids, """head_mask""": head_mask, } return config, inputs_dict @require_tf class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () lowerCamelCase = (TFXGLMForCausalLM,) if is_tf_available() else () lowerCamelCase = ( {'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =TFXGLMModelTester(self ) _lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase , n_embd=37 ) def _lowerCAmelCase ( self ) -> int: self.config_tester.run_common_tests() @slow def _lowerCAmelCase ( self ) -> Union[str, Any]: for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase =TFXGLMModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" ) def _lowerCAmelCase ( self ) -> Union[str, Any]: super().test_resize_token_embeddings() @require_tf class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def _lowerCAmelCase ( self , __UpperCAmelCase=True ) -> str: _lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase =tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off _lowerCAmelCase =[2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81] # fmt: on _lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCAmelCase ) @slow def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) tf.random.set_seed(0 ) _lowerCAmelCase =tokenizer("""Today is a nice day and""" , return_tensors="""tf""" ) _lowerCAmelCase =tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(""":/CPU:0""" ): _lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , seed=[7, 0] ) _lowerCAmelCase =tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =( """Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due""" ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) @slow def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase ="""left""" # use different length sentences to test batching _lowerCAmelCase =[ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When""", """Hello, my dog is a little""", ] _lowerCAmelCase =tokenizer(__UpperCAmelCase , return_tensors="""tf""" , padding=__UpperCAmelCase ) _lowerCAmelCase =inputs["""input_ids"""] _lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 ) _lowerCAmelCase =tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids _lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 ) _lowerCAmelCase =tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids _lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 ) _lowerCAmelCase =tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =[ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """ """a single""", """Hello, my dog is a little bit of a shy one, but he is very friendly""", ] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
341
1
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str: if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(__UpperCamelCase , n - 1 , __UpperCamelCase ) * a) % mod else: _lowerCAmelCase =binary_exponentiation(__UpperCamelCase , n / 2 , __UpperCamelCase ) return (b * b) % mod # a prime number __A = 701 __A = 10_0000_0000 __A = 10 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
341
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __A = logging.get_logger(__name__) __A = {'vocab_file': 'spiece.model'} __A = { 'vocab_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model', } } __A = { 'xlnet-base-cased': None, 'xlnet-large-cased': None, } # Segments (not really needed) __A = 0 __A = 1 __A = 2 __A = 3 __A = 4 class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = VOCAB_FILES_NAMES lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase = '''left''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<sep>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<cls>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<eop>", "<eod>"] , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token _lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , ) _lowerCAmelCase =3 _lowerCAmelCase =do_lower_case _lowerCAmelCase =remove_space _lowerCAmelCase =keep_accents _lowerCAmelCase =vocab_file _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCAmelCase ) @property def _lowerCAmelCase ( self ) -> str: return len(self.sp_model ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Optional[int]: _lowerCAmelCase =self.__dict__.copy() _lowerCAmelCase =None return state def __setstate__( self , __UpperCAmelCase ) -> Tuple: _lowerCAmelCase =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _lowerCAmelCase ={} _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[Any]: if self.remove_space: _lowerCAmelCase =""" """.join(inputs.strip().split() ) else: _lowerCAmelCase =inputs _lowerCAmelCase =outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" ) if not self.keep_accents: _lowerCAmelCase =unicodedata.normalize("""NFKD""" , __UpperCAmelCase ) _lowerCAmelCase ="""""".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] ) if self.do_lower_case: _lowerCAmelCase =outputs.lower() return outputs def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]: _lowerCAmelCase =self.preprocess_text(__UpperCAmelCase ) _lowerCAmelCase =self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase ) _lowerCAmelCase =[] for piece in pieces: if len(__UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): _lowerCAmelCase =self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , """""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: _lowerCAmelCase =cur_pieces[1:] else: _lowerCAmelCase =cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__UpperCAmelCase ) else: new_pieces.append(__UpperCAmelCase ) return new_pieces def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]: return self.sp_model.PieceToId(__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]: return self.sp_model.IdToPiece(__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str: _lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip() return out_string def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> str: _lowerCAmelCase =kwargs.pop("""use_source_tokenizer""" , __UpperCAmelCase ) _lowerCAmelCase =self.convert_ids_to_tokens(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 _lowerCAmelCase =[] _lowerCAmelCase =[] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) ) _lowerCAmelCase =[] sub_texts.append(__UpperCAmelCase ) else: current_sub_text.append(__UpperCAmelCase ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens _lowerCAmelCase ="""""".join(__UpperCAmelCase ) _lowerCAmelCase =( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: _lowerCAmelCase =self.clean_up_tokenization(__UpperCAmelCase ) return clean_text else: return text def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _lowerCAmelCase =[self.sep_token_id] _lowerCAmelCase =[self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is not None: return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] return ([0] * len(__UpperCAmelCase )) + [1, 1] def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _lowerCAmelCase =[self.sep_token_id] _lowerCAmelCase =[2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _lowerCAmelCase =os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase , """wb""" ) as fi: _lowerCAmelCase =self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,)
341
1
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase = 1000 ) -> int: _lowerCAmelCase , _lowerCAmelCase =1, 1 _lowerCAmelCase =[] for i in range(1 , n + 1 ): _lowerCAmelCase =prev_numerator + 2 * prev_denominator _lowerCAmelCase =prev_numerator + prev_denominator if len(str(__UpperCamelCase ) ) > len(str(__UpperCamelCase ) ): result.append(__UpperCamelCase ) _lowerCAmelCase =numerator _lowerCAmelCase =denominator return len(__UpperCamelCase ) if __name__ == "__main__": print(F"""{solution() = }""")
341
"""simple docstring""" from __future__ import annotations def _lowerCamelCase(__UpperCamelCase ) -> bool: _lowerCAmelCase =str(__UpperCamelCase ) return n == n[::-1] def _lowerCamelCase(__UpperCamelCase = 1000000 ) -> str: _lowerCAmelCase =0 for i in range(1 , __UpperCamelCase ): if is_palindrome(__UpperCamelCase ) and is_palindrome(bin(__UpperCamelCase ).split("""b""" )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
341
1
"""simple docstring""" from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar __A = TypeVar('KEY') __A = TypeVar('VAL') @dataclass(frozen=__magic_name__ , slots=__magic_name__ ) class lowerCamelCase__ ( Generic[KEY, VAL] ): '''simple docstring''' lowerCamelCase = 42 lowerCamelCase = 42 class lowerCamelCase__ ( _Item ): '''simple docstring''' def __init__( self ) -> None: super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __bool__( self ) -> bool: return False __A = _DeletedItem() class lowerCamelCase__ ( MutableMapping[KEY, VAL] ): '''simple docstring''' def __init__( self , __UpperCAmelCase = 8 , __UpperCAmelCase = 0.7_5 ) -> None: _lowerCAmelCase =initial_block_size _lowerCAmelCase =[None] * initial_block_size assert 0.0 < capacity_factor < 1.0 _lowerCAmelCase =capacity_factor _lowerCAmelCase =0 def _lowerCAmelCase ( self , __UpperCAmelCase ) -> int: return hash(__UpperCAmelCase ) % len(self._buckets ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> int: return (ind + 1) % len(self._buckets ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> bool: _lowerCAmelCase =self._buckets[ind] if not stored: _lowerCAmelCase =_Item(__UpperCAmelCase , __UpperCAmelCase ) self._len += 1 return True elif stored.key == key: _lowerCAmelCase =_Item(__UpperCAmelCase , __UpperCAmelCase ) return True else: return False def _lowerCAmelCase ( self ) -> bool: _lowerCAmelCase =len(self._buckets ) * self._capacity_factor return len(self ) >= int(__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> bool: if len(self._buckets ) <= self._initial_block_size: return False _lowerCAmelCase =len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def _lowerCAmelCase ( self , __UpperCAmelCase ) -> None: _lowerCAmelCase =self._buckets _lowerCAmelCase =[None] * new_size _lowerCAmelCase =0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def _lowerCAmelCase ( self ) -> None: self._resize(len(self._buckets ) * 2 ) def _lowerCAmelCase ( self ) -> None: self._resize(len(self._buckets ) // 2 ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Iterator[int]: _lowerCAmelCase =self._get_bucket_index(__UpperCAmelCase ) for _ in range(len(self._buckets ) ): yield ind _lowerCAmelCase =self._get_next_ind(__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> None: for ind in self._iterate_buckets(__UpperCAmelCase ): if self._try_set(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): break def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ) -> None: if self._is_full(): self._size_up() self._add_item(__UpperCAmelCase , __UpperCAmelCase ) def __delitem__( self , __UpperCAmelCase ) -> None: for ind in self._iterate_buckets(__UpperCAmelCase ): _lowerCAmelCase =self._buckets[ind] if item is None: raise KeyError(__UpperCAmelCase ) if item is _deleted: continue if item.key == key: _lowerCAmelCase =_deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self , __UpperCAmelCase ) -> VAL: for ind in self._iterate_buckets(__UpperCAmelCase ): _lowerCAmelCase =self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(__UpperCAmelCase ) def __len__( self ) -> int: return self._len def __iter__( self ) -> Iterator[KEY]: yield from (item.key for item in self._buckets if item) def __repr__( self ) -> str: _lowerCAmelCase =""" ,""".join( f'''{item.key}: {item.val}''' for item in self._buckets if item ) return f'''HashMap({val_string})'''
341
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = {} class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = '''llama''' lowerCamelCase = ['''past_key_values'''] def __init__( self , __UpperCAmelCase=3_20_00 , __UpperCAmelCase=40_96 , __UpperCAmelCase=1_10_08 , __UpperCAmelCase=32 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase="silu" , __UpperCAmelCase=20_48 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-6 , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=False , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]: _lowerCAmelCase =vocab_size _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =hidden_size _lowerCAmelCase =intermediate_size _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads # for backward compatibility if num_key_value_heads is None: _lowerCAmelCase =num_attention_heads _lowerCAmelCase =num_key_value_heads _lowerCAmelCase =hidden_act _lowerCAmelCase =initializer_range _lowerCAmelCase =rms_norm_eps _lowerCAmelCase =pretraining_tp _lowerCAmelCase =use_cache _lowerCAmelCase =rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , ) def _lowerCAmelCase ( self ) -> str: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ f'''got {self.rope_scaling}''' ) _lowerCAmelCase =self.rope_scaling.get("""type""" , __UpperCAmelCase ) _lowerCAmelCase =self.rope_scaling.get("""factor""" , __UpperCAmelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
341
1
"""simple docstring""" import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup __A = logging.get_logger(__name__) class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' def __init__( self , **__UpperCAmelCase ) -> List[str]: requires_backends(self , ["""bs4"""] ) super().__init__(**__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]: _lowerCAmelCase =[] _lowerCAmelCase =[] _lowerCAmelCase =element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag _lowerCAmelCase =parent.find_all(child.name , recursive=__UpperCAmelCase ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(__UpperCAmelCase ) else next(i for i, s in enumerate(__UpperCAmelCase , 1 ) if s is child ) ) _lowerCAmelCase =parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Dict: _lowerCAmelCase =BeautifulSoup(__UpperCAmelCase , """html.parser""" ) _lowerCAmelCase =[] _lowerCAmelCase =[] _lowerCAmelCase =[] for element in html_code.descendants: if type(__UpperCAmelCase ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue _lowerCAmelCase =html.unescape(__UpperCAmelCase ).strip() if not text_in_this_tag: continue all_doc_strings.append(__UpperCAmelCase ) _lowerCAmelCase , _lowerCAmelCase =self.xpath_soup(__UpperCAmelCase ) stringaxtag_seq.append(__UpperCAmelCase ) stringaxsubs_seq.append(__UpperCAmelCase ) if len(__UpperCAmelCase ) != len(__UpperCAmelCase ): raise ValueError("""Number of doc strings and xtags does not correspond""" ) if len(__UpperCAmelCase ) != len(__UpperCAmelCase ): raise ValueError("""Number of doc strings and xsubs does not correspond""" ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: _lowerCAmelCase ="""""" for tagname, subs in zip(__UpperCAmelCase , __UpperCAmelCase ): xpath += f'''/{tagname}''' if subs != 0: xpath += f'''[{subs}]''' return xpath def __call__( self , __UpperCAmelCase ) -> BatchFeature: _lowerCAmelCase =False # Check that strings has a valid type if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _lowerCAmelCase =True elif isinstance(__UpperCAmelCase , (list, tuple) ): if len(__UpperCAmelCase ) == 0 or isinstance(html_strings[0] , __UpperCAmelCase ): _lowerCAmelCase =True if not valid_strings: raise ValueError( """HTML strings must of type `str`, `List[str]` (batch of examples), """ f'''but is of type {type(__UpperCAmelCase )}.''' ) _lowerCAmelCase =bool(isinstance(__UpperCAmelCase , (list, tuple) ) and (isinstance(html_strings[0] , __UpperCAmelCase )) ) if not is_batched: _lowerCAmelCase =[html_strings] # Get nodes + xpaths _lowerCAmelCase =[] _lowerCAmelCase =[] for html_string in html_strings: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self.get_three_from_single(__UpperCAmelCase ) nodes.append(__UpperCAmelCase ) _lowerCAmelCase =[] for node, tag_list, sub_list in zip(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): _lowerCAmelCase =self.construct_xpath(__UpperCAmelCase , __UpperCAmelCase ) xpath_strings.append(__UpperCAmelCase ) xpaths.append(__UpperCAmelCase ) # return as Dict _lowerCAmelCase ={"""nodes""": nodes, """xpaths""": xpaths} _lowerCAmelCase =BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase ) return encoded_inputs
341
"""simple docstring""" import warnings from .generation import TFGenerationMixin class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' # warning at import time warnings.warn( '''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will ''' '''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , __magic_name__ , )
341
1
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase ) -> str: if not all(char in """01""" for char in bin_string ): raise ValueError("""Non-binary value was passed to the function""" ) if not bin_string: raise ValueError("""Empty string was passed to the function""" ) _lowerCAmelCase ="""""" while len(__UpperCamelCase ) % 3 != 0: _lowerCAmelCase ="""0""" + bin_string _lowerCAmelCase =[ bin_string[index : index + 3] for index in range(len(__UpperCamelCase ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: _lowerCAmelCase =0 for index, val in enumerate(__UpperCamelCase ): oct_val += int(2 ** (2 - index) * int(__UpperCamelCase ) ) oct_string += str(__UpperCamelCase ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
341
"""simple docstring""" import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class lowerCamelCase__ : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=16 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=30 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=None , ) -> Any: _lowerCAmelCase =parent _lowerCAmelCase =batch_size _lowerCAmelCase =decoder_seq_length # For common tests _lowerCAmelCase =self.decoder_seq_length _lowerCAmelCase =is_training _lowerCAmelCase =use_attention_mask _lowerCAmelCase =use_labels _lowerCAmelCase =vocab_size _lowerCAmelCase =d_model _lowerCAmelCase =d_model _lowerCAmelCase =decoder_layers _lowerCAmelCase =decoder_layers _lowerCAmelCase =decoder_ffn_dim _lowerCAmelCase =decoder_attention_heads _lowerCAmelCase =decoder_attention_heads _lowerCAmelCase =eos_token_id _lowerCAmelCase =bos_token_id _lowerCAmelCase =pad_token_id _lowerCAmelCase =decoder_start_token_id _lowerCAmelCase =use_cache _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =None _lowerCAmelCase =decoder_seq_length _lowerCAmelCase =2 _lowerCAmelCase =1 def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) _lowerCAmelCase =None if self.use_attention_mask: _lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) _lowerCAmelCase =None if self.use_labels: _lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) _lowerCAmelCase =TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[Any]: _lowerCAmelCase =True _lowerCAmelCase =TrOCRDecoder(config=__UpperCAmelCase ).to(__UpperCAmelCase ).eval() _lowerCAmelCase =input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass _lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase ) _lowerCAmelCase =model(__UpperCAmelCase ) _lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase ) self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) ) self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 ) _lowerCAmelCase =outputs["""past_key_values"""] # create hypothetical next token and extent to next_input_ids _lowerCAmelCase =ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and _lowerCAmelCase =torch.cat([input_ids, next_tokens] , dim=-1 ) _lowerCAmelCase =model(__UpperCAmelCase )["""last_hidden_state"""] _lowerCAmelCase =model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )["""last_hidden_state"""] # select random slice _lowerCAmelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item() _lowerCAmelCase =output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() _lowerCAmelCase =output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) def _lowerCAmelCase ( self ) -> List[str]: _lowerCAmelCase =self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs _lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_torch class lowerCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () lowerCamelCase = (TrOCRForCausalLM,) if is_torch_available() else () lowerCamelCase = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {} lowerCamelCase = True lowerCamelCase = False def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =TrOCRStandaloneDecoderModelTester(self , is_training=__UpperCAmelCase ) _lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> List[str]: pass def _lowerCAmelCase ( self ) -> List[Any]: pass def _lowerCAmelCase ( self ) -> Any: pass def _lowerCAmelCase ( self ) -> Optional[Any]: self.config_tester.run_common_tests() def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Tuple: return @unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :) def _lowerCAmelCase ( self ) -> str: pass
341
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json', 'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json', 'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json', 'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json', 'funnel-transformer/intermediate': ( 'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json' ), 'funnel-transformer/intermediate-base': ( 'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json' ), 'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json', 'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json', 'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json', 'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json', } class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = '''funnel''' lowerCamelCase = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', } def __init__( self , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=[4, 4, 4] , __UpperCAmelCase=None , __UpperCAmelCase=2 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=64 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=None , __UpperCAmelCase=1e-9 , __UpperCAmelCase="mean" , __UpperCAmelCase="relative_shift" , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , **__UpperCAmelCase , ) -> Any: _lowerCAmelCase =vocab_size _lowerCAmelCase =block_sizes _lowerCAmelCase =[1] * len(__UpperCAmelCase ) if block_repeats is None else block_repeats assert len(__UpperCAmelCase ) == len( self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length." _lowerCAmelCase =num_decoder_layers _lowerCAmelCase =d_model _lowerCAmelCase =n_head _lowerCAmelCase =d_head _lowerCAmelCase =d_inner _lowerCAmelCase =hidden_act _lowerCAmelCase =hidden_dropout _lowerCAmelCase =attention_dropout _lowerCAmelCase =activation_dropout _lowerCAmelCase =initializer_range _lowerCAmelCase =initializer_std _lowerCAmelCase =layer_norm_eps assert pooling_type in [ "mean", "max", ], f'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.''' _lowerCAmelCase =pooling_type assert attention_type in [ "relative_shift", "factorized", ], f'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.''' _lowerCAmelCase =attention_type _lowerCAmelCase =separate_cls _lowerCAmelCase =truncate_seq _lowerCAmelCase =pool_q_only super().__init__(**__UpperCAmelCase ) @property def _lowerCAmelCase ( self ) -> Any: return sum(self.block_sizes ) @num_hidden_layers.setter def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]: raise NotImplementedError( """This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.""" ) @property def _lowerCAmelCase ( self ) -> int: return len(self.block_sizes ) @num_blocks.setter def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]: raise NotImplementedError("""This model does not support the setting of `num_blocks`. Please set `block_sizes`.""" )
341
"""simple docstring""" import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' lowerCamelCase = JukeboxTokenizer lowerCamelCase = { '''artist''': '''Zac Brown Band''', '''genres''': '''Country''', '''lyrics''': '''I met a traveller from an antique land, Who said "Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away ''', } @require_torch def _lowerCAmelCase ( self ) -> str: import torch _lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" ) _lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""] # fmt: off _lowerCAmelCase =[ torch.tensor([[ 0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def _lowerCAmelCase ( self ) -> Any: import torch _lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" ) _lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""] # fmt: off _lowerCAmelCase =[ torch.tensor([[ 0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
341
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json', # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = '''convbert''' def __init__( self , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=7_68 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-12 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=7_68 , __UpperCAmelCase=2 , __UpperCAmelCase=9 , __UpperCAmelCase=1 , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[int]: super().__init__( pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , ) _lowerCAmelCase =vocab_size _lowerCAmelCase =hidden_size _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads _lowerCAmelCase =intermediate_size _lowerCAmelCase =hidden_act _lowerCAmelCase =hidden_dropout_prob _lowerCAmelCase =attention_probs_dropout_prob _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =type_vocab_size _lowerCAmelCase =initializer_range _lowerCAmelCase =layer_norm_eps _lowerCAmelCase =embedding_size _lowerCAmelCase =head_ratio _lowerCAmelCase =conv_kernel_size _lowerCAmelCase =num_groups _lowerCAmelCase =classifier_dropout class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' @property def _lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _lowerCAmelCase ={0: """batch""", 1: """choice""", 2: """sequence"""} else: _lowerCAmelCase ={0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
341
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __A = logging.get_logger(__name__) __A = '▁' __A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'} __A = { 'vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model', }, 'monolingual_vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt', }, } __A = {'vinai/bartpho-syllable': 1024} class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = VOCAB_FILES_NAMES lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token _lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , ) _lowerCAmelCase =vocab_file _lowerCAmelCase =monolingual_vocab_file _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__UpperCAmelCase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility _lowerCAmelCase ={} _lowerCAmelCase =0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids: _lowerCAmelCase =cnt cnt += 1 with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f: for line in f.readlines(): _lowerCAmelCase =line.strip().split()[0] _lowerCAmelCase =len(self.fairseq_tokens_to_ids ) if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids: _lowerCAmelCase =len(self.fairseq_tokens_to_ids ) _lowerCAmelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> Dict: _lowerCAmelCase =self.__dict__.copy() _lowerCAmelCase =None _lowerCAmelCase =self.sp_model.serialized_model_proto() return state def __setstate__( self , __UpperCAmelCase ) -> List[Any]: _lowerCAmelCase =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _lowerCAmelCase ={} _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _lowerCAmelCase =[self.cls_token_id] _lowerCAmelCase =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1] def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _lowerCAmelCase =[self.sep_token_id] _lowerCAmelCase =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _lowerCAmelCase ( self ) -> Union[str, Any]: return len(self.fairseq_ids_to_tokens ) def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]: return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]: return self.fairseq_ids_to_tokens[index] def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]: _lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip() return out_string def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _lowerCAmelCase =os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) _lowerCAmelCase =os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase , """wb""" ) as fi: _lowerCAmelCase =self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( __UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'''{str(__UpperCAmelCase )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
341
1
"""simple docstring""" from __future__ import annotations __A = list[list[int]] # assigning initial values to the grid __A = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution __A = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> bool: for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def _lowerCamelCase(__UpperCamelCase ) -> tuple[int, int] | None: for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def _lowerCamelCase(__UpperCamelCase ) -> Matrix | None: if location := find_empty_location(__UpperCamelCase ): _lowerCAmelCase , _lowerCAmelCase =location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): _lowerCAmelCase =digit if sudoku(__UpperCamelCase ) is not None: return grid _lowerCAmelCase =0 return None def _lowerCamelCase(__UpperCamelCase ) -> None: for row in grid: for cell in row: print(__UpperCamelCase , end=""" """ ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print('\nExample grid:\n' + '=' * 20) print_solution(example_grid) print('\nExample grid solution:') __A = sudoku(example_grid) if solution is not None: print_solution(solution) else: print('Cannot find a solution.')
341
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =1 _lowerCAmelCase =3 _lowerCAmelCase =(32, 32) _lowerCAmelCase =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase ) return image @property def _lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) _lowerCAmelCase =UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , ) return model @property def _lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) _lowerCAmelCase =AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) return model @property def _lowerCAmelCase ( self ) -> Optional[Any]: torch.manual_seed(0 ) _lowerCAmelCase =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , ) return CLIPTextModel(__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase =self.dummy_cond_unet_upscale _lowerCAmelCase =DDPMScheduler() _lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" ) _lowerCAmelCase =self.dummy_vae _lowerCAmelCase =self.dummy_text_encoder _lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk _lowerCAmelCase =StableDiffusionUpscalePipeline( unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , ) _lowerCAmelCase =sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase ="""A painting of a squirrel eating a burger""" _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) _lowerCAmelCase =output.images _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=__UpperCAmelCase , )[0] _lowerCAmelCase =image[0, -3:, -3:, -1] _lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1] _lowerCAmelCase =low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) _lowerCAmelCase =np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase =self.dummy_cond_unet_upscale _lowerCAmelCase =DDPMScheduler() _lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" ) _lowerCAmelCase =self.dummy_vae _lowerCAmelCase =self.dummy_text_encoder _lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk _lowerCAmelCase =StableDiffusionUpscalePipeline( unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , ) _lowerCAmelCase =sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase ="""A painting of a squirrel eating a burger""" _lowerCAmelCase =sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) _lowerCAmelCase =output.images assert image.shape[0] == 2 _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) _lowerCAmelCase =output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =self.dummy_cond_unet_upscale _lowerCAmelCase =DDPMScheduler() _lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" ) _lowerCAmelCase =self.dummy_vae _lowerCAmelCase =self.dummy_text_encoder _lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 _lowerCAmelCase =unet.half() _lowerCAmelCase =text_encoder.half() # make sure here that pndm scheduler skips prk _lowerCAmelCase =StableDiffusionUpscalePipeline( unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , ) _lowerCAmelCase =sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase ="""A painting of a squirrel eating a burger""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , ).images _lowerCAmelCase =low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) _lowerCAmelCase =load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat.npy""" ) _lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler""" _lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _lowerCAmelCase ="""a cat sitting on a park bench""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , ) _lowerCAmelCase =output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 1e-3 def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) _lowerCAmelCase =load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat_fp16.npy""" ) _lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler""" _lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _lowerCAmelCase ="""a cat sitting on a park bench""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , ) _lowerCAmelCase =output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 5e-1 def _lowerCAmelCase ( self ) -> Optional[Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _lowerCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) _lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler""" _lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() _lowerCAmelCase ="""a cat sitting on a park bench""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , output_type="""np""" , ) _lowerCAmelCase =torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
341
1
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=1 ) -> Tuple: if n_shave_prefix_segments >= 0: return ".".join(path.split(""".""" )[n_shave_prefix_segments:] ) else: return ".".join(path.split(""".""" )[:n_shave_prefix_segments] ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> List[str]: _lowerCAmelCase =[] for old_item in old_list: _lowerCAmelCase =old_item.replace("""in_layers.0""" , """norm1""" ) _lowerCAmelCase =new_item.replace("""in_layers.2""" , """conv1""" ) _lowerCAmelCase =new_item.replace("""out_layers.0""" , """norm2""" ) _lowerCAmelCase =new_item.replace("""out_layers.3""" , """conv2""" ) _lowerCAmelCase =new_item.replace("""emb_layers.1""" , """time_emb_proj""" ) _lowerCAmelCase =new_item.replace("""skip_connection""" , """conv_shortcut""" ) _lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> Tuple: _lowerCAmelCase =[] for old_item in old_list: _lowerCAmelCase =old_item _lowerCAmelCase =new_item.replace("""norm.weight""" , """group_norm.weight""" ) _lowerCAmelCase =new_item.replace("""norm.bias""" , """group_norm.bias""" ) _lowerCAmelCase =new_item.replace("""proj_out.weight""" , """proj_attn.weight""" ) _lowerCAmelCase =new_item.replace("""proj_out.bias""" , """proj_attn.bias""" ) _lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None ) -> Optional[int]: assert isinstance(__UpperCamelCase , __UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): _lowerCAmelCase =old_checkpoint[path] _lowerCAmelCase =old_tensor.shape[0] // 3 _lowerCAmelCase =(-1, channels) if len(old_tensor.shape ) == 3 else (-1) _lowerCAmelCase =old_tensor.shape[0] // config["""num_head_channels"""] // 3 _lowerCAmelCase =old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =old_tensor.split(channels // num_heads , dim=1 ) _lowerCAmelCase =query.reshape(__UpperCamelCase ) _lowerCAmelCase =key.reshape(__UpperCamelCase ) _lowerCAmelCase =value.reshape(__UpperCamelCase ) for path in paths: _lowerCAmelCase =path["""new"""] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here _lowerCAmelCase =new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" ) _lowerCAmelCase =new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" ) _lowerCAmelCase =new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" ) if additional_replacements is not None: for replacement in additional_replacements: _lowerCAmelCase =new_path.replace(replacement["""old"""] , replacement["""new"""] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: _lowerCAmelCase =old_checkpoint[path["""old"""]][:, :, 0] else: _lowerCAmelCase =old_checkpoint[path["""old"""]] def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: _lowerCAmelCase ={} _lowerCAmelCase =checkpoint["""time_embed.0.weight"""] _lowerCAmelCase =checkpoint["""time_embed.0.bias"""] _lowerCAmelCase =checkpoint["""time_embed.2.weight"""] _lowerCAmelCase =checkpoint["""time_embed.2.bias"""] _lowerCAmelCase =checkpoint["""input_blocks.0.0.weight"""] _lowerCAmelCase =checkpoint["""input_blocks.0.0.bias"""] _lowerCAmelCase =checkpoint["""out.0.weight"""] _lowerCAmelCase =checkpoint["""out.0.bias"""] _lowerCAmelCase =checkpoint["""out.2.weight"""] _lowerCAmelCase =checkpoint["""out.2.bias"""] # Retrieves the keys for the input blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } # Retrieves the keys for the middle blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } # Retrieves the keys for the output blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } for i in range(1 , __UpperCamelCase ): _lowerCAmelCase =(i - 1) // (config["""num_res_blocks"""] + 1) _lowerCAmelCase =(i - 1) % (config["""num_res_blocks"""] + 1) _lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key] _lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key] if F'''input_blocks.{i}.0.op.weight''' in checkpoint: _lowerCAmelCase =checkpoint[ F'''input_blocks.{i}.0.op.weight''' ] _lowerCAmelCase =checkpoint[ F'''input_blocks.{i}.0.op.bias''' ] continue _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase ={"""old""": F'''input_blocks.{i}.0''', """new""": F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''} _lowerCAmelCase ={"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""} assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=__UpperCamelCase ) if len(__UpperCamelCase ): _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """old""": F'''input_blocks.{i}.1''', """new""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''', } _lowerCAmelCase ={ F'''input_blocks.{i}.1.qkv.bias''': { """key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', """query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', """value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''input_blocks.{i}.1.qkv.weight''': { """key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', """query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', """value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase , ) _lowerCAmelCase =middle_blocks[0] _lowerCAmelCase =middle_blocks[1] _lowerCAmelCase =middle_blocks[2] _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase ) _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase ) _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """middle_block.1.qkv.bias""": { """key""": """mid_block.attentions.0.key.bias""", """query""": """mid_block.attentions.0.query.bias""", """value""": """mid_block.attentions.0.value.bias""", }, """middle_block.1.qkv.weight""": { """key""": """mid_block.attentions.0.key.weight""", """query""": """mid_block.attentions.0.query.weight""", """value""": """mid_block.attentions.0.value.weight""", }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase ) for i in range(__UpperCamelCase ): _lowerCAmelCase =i // (config["""num_res_blocks"""] + 1) _lowerCAmelCase =i % (config["""num_res_blocks"""] + 1) _lowerCAmelCase =[shave_segments(__UpperCamelCase , 2 ) for name in output_blocks[i]] _lowerCAmelCase ={} for layer in output_block_layers: _lowerCAmelCase , _lowerCAmelCase =layer.split(""".""" )[0], shave_segments(__UpperCamelCase , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(__UpperCamelCase ) else: _lowerCAmelCase =[layer_name] if len(__UpperCamelCase ) > 1: _lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key] _lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key] _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase ={"""old""": F'''output_blocks.{i}.0''', """new""": F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''} assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): _lowerCAmelCase =list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] ) _lowerCAmelCase =checkpoint[ F'''output_blocks.{i}.{index}.conv.weight''' ] _lowerCAmelCase =checkpoint[ F'''output_blocks.{i}.{index}.conv.bias''' ] # Clear attentions as they have been attributed above. if len(__UpperCamelCase ) == 2: _lowerCAmelCase =[] if len(__UpperCamelCase ): _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """old""": F'''output_blocks.{i}.1''', """new""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''', } _lowerCAmelCase ={ F'''output_blocks.{i}.1.qkv.bias''': { """key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', """query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', """value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''output_blocks.{i}.1.qkv.weight''': { """key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', """query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', """value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=__UpperCamelCase , ) else: _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase , n_shave_prefix_segments=1 ) for path in resnet_0_paths: _lowerCAmelCase =""".""".join(["""output_blocks""", str(__UpperCamelCase ), path["""old"""]] ) _lowerCAmelCase =""".""".join(["""up_blocks""", str(__UpperCamelCase ), """resnets""", str(__UpperCamelCase ), path["""new"""]] ) _lowerCAmelCase =checkpoint[old_path] return new_checkpoint if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the architecture.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') __A = parser.parse_args() __A = torch.load(args.checkpoint_path) with open(args.config_file) as f: __A = json.loads(f.read()) __A = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] __A = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: __A = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1])) __A = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1])) __A = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
341
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = '''cvt''' def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[64, 1_92, 3_84] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[4.0, 4.0, 4.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.1] , __UpperCAmelCase=[True, True, True] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , __UpperCAmelCase=[3, 3, 3] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-12 , **__UpperCAmelCase , ) -> Optional[Any]: super().__init__(**__UpperCAmelCase ) _lowerCAmelCase =num_channels _lowerCAmelCase =patch_sizes _lowerCAmelCase =patch_stride _lowerCAmelCase =patch_padding _lowerCAmelCase =embed_dim _lowerCAmelCase =num_heads _lowerCAmelCase =depth _lowerCAmelCase =mlp_ratio _lowerCAmelCase =attention_drop_rate _lowerCAmelCase =drop_rate _lowerCAmelCase =drop_path_rate _lowerCAmelCase =qkv_bias _lowerCAmelCase =cls_token _lowerCAmelCase =qkv_projection_method _lowerCAmelCase =kernel_qkv _lowerCAmelCase =padding_kv _lowerCAmelCase =stride_kv _lowerCAmelCase =padding_q _lowerCAmelCase =stride_q _lowerCAmelCase =initializer_range _lowerCAmelCase =layer_norm_eps
341
1
"""simple docstring""" import os from typing import Dict, List, Tuple, TypeVar, Union __A = TypeVar('T') __A = Union[List[T], Tuple[T, ...]] __A = Union[T, List[T], Dict[str, T]] __A = Union[str, bytes, os.PathLike]
341
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = ['''image_processor''', '''tokenizer'''] lowerCamelCase = '''CLIPImageProcessor''' lowerCamelCase = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''') def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Union[str, Any]: _lowerCAmelCase =None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __UpperCAmelCase , ) _lowerCAmelCase =kwargs.pop("""feature_extractor""" ) _lowerCAmelCase =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Optional[Any]: if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: _lowerCAmelCase =self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if images is not None: _lowerCAmelCase =self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if text is not None and images is not None: _lowerCAmelCase =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase ) def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @property def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =self.tokenizer.model_input_names _lowerCAmelCase =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
341
1
"""simple docstring""" import inspect import unittest import warnings from math import ceil, floor from transformers import LevitConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_MAPPING, LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, ) from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' def _lowerCAmelCase ( self ) -> List[str]: _lowerCAmelCase =self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__UpperCAmelCase , """hidden_sizes""" ) ) self.parent.assertTrue(hasattr(__UpperCAmelCase , """num_attention_heads""" ) ) class lowerCamelCase__ : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=3 , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=16 , __UpperCAmelCase=[1_28, 2_56, 3_84] , __UpperCAmelCase=[4, 6, 8] , __UpperCAmelCase=[2, 3, 4] , __UpperCAmelCase=[16, 16, 16] , __UpperCAmelCase=0 , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=2 , ) -> List[Any]: _lowerCAmelCase =parent _lowerCAmelCase =batch_size _lowerCAmelCase =image_size _lowerCAmelCase =num_channels _lowerCAmelCase =kernel_size _lowerCAmelCase =stride _lowerCAmelCase =padding _lowerCAmelCase =hidden_sizes _lowerCAmelCase =num_attention_heads _lowerCAmelCase =depths _lowerCAmelCase =key_dim _lowerCAmelCase =drop_path_rate _lowerCAmelCase =patch_size _lowerCAmelCase =attention_ratio _lowerCAmelCase =mlp_ratio _lowerCAmelCase =initializer_range _lowerCAmelCase =[ ["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] _lowerCAmelCase =is_training _lowerCAmelCase =use_labels _lowerCAmelCase =num_labels _lowerCAmelCase =initializer_range def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase =None if self.use_labels: _lowerCAmelCase =ids_tensor([self.batch_size] , self.num_labels ) _lowerCAmelCase =self.get_config() return config, pixel_values, labels def _lowerCAmelCase ( self ) -> str: return LevitConfig( image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict: _lowerCAmelCase =LevitModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _lowerCAmelCase =model(__UpperCAmelCase ) _lowerCAmelCase =(self.image_size, self.image_size) _lowerCAmelCase , _lowerCAmelCase =image_size[0], image_size[1] for _ in range(4 ): _lowerCAmelCase =floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) _lowerCAmelCase =floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: _lowerCAmelCase =self.num_labels _lowerCAmelCase =LevitForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _lowerCAmelCase =model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCAmelCase ( self ) -> List[str]: _lowerCAmelCase =self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs _lowerCAmelCase ={"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = ( (LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher) if is_torch_available() else () ) lowerCamelCase = ( { '''feature-extraction''': LevitModel, '''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher), } if is_torch_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def _lowerCAmelCase ( self ) -> str: _lowerCAmelCase =LevitModelTester(self ) _lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 ) def _lowerCAmelCase ( self ) -> List[str]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowerCAmelCase ( self ) -> List[str]: return @unittest.skip(reason="""Levit does not use inputs_embeds""" ) def _lowerCAmelCase ( self ) -> Tuple: pass @unittest.skip(reason="""Levit does not support input and output embeddings""" ) def _lowerCAmelCase ( self ) -> str: pass @unittest.skip(reason="""Levit does not output attentions""" ) def _lowerCAmelCase ( self ) -> List[Any]: pass def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase =model_class(__UpperCAmelCase ) _lowerCAmelCase =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase =[*signature.parameters.keys()] _lowerCAmelCase =["""pixel_values"""] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Optional[Any]: def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): _lowerCAmelCase =model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): _lowerCAmelCase =model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) _lowerCAmelCase =outputs.hidden_states _lowerCAmelCase =len(self.model_tester.depths ) + 1 self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) _lowerCAmelCase =(self.model_tester.image_size, self.model_tester.image_size) _lowerCAmelCase , _lowerCAmelCase =image_size[0], image_size[1] for _ in range(4 ): _lowerCAmelCase =floor( ( (height + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) _lowerCAmelCase =floor( ( (width + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [ height * width, self.model_tester.hidden_sizes[0], ] , ) _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase =True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase =True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def _lowerCAmelCase ( self ) -> str: pass def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> Optional[Any]: _lowerCAmelCase =super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) if return_labels: if model_class.__name__ == "LevitForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Any: if not self.model_tester.is_training: return _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase =True for model_class in self.all_model_classes: # LevitForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(__UpperCAmelCase ) or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue _lowerCAmelCase =model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.train() _lowerCAmelCase =self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) _lowerCAmelCase =model(**__UpperCAmelCase ).loss loss.backward() def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return _lowerCAmelCase =False _lowerCAmelCase =True for model_class in self.all_model_classes: if model_class in get_values(__UpperCAmelCase ) or not model_class.supports_gradient_checkpointing: continue # LevitForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "LevitForImageClassificationWithTeacher": continue _lowerCAmelCase =model_class(__UpperCAmelCase ) model.gradient_checkpointing_enable() model.to(__UpperCAmelCase ) model.train() _lowerCAmelCase =self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) _lowerCAmelCase =model(**__UpperCAmelCase ).loss loss.backward() def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase =[ {"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float}, {"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long}, {"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(__UpperCAmelCase ), ] or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f'''Testing {model_class} with {problem_type['title']}''' ): _lowerCAmelCase =problem_type["""title"""] _lowerCAmelCase =problem_type["""num_labels"""] _lowerCAmelCase =model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.train() _lowerCAmelCase =self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) if problem_type["num_labels"] > 1: _lowerCAmelCase =inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] ) _lowerCAmelCase =inputs["""labels"""].to(problem_type["""dtype"""] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=__UpperCAmelCase ) as warning_list: _lowerCAmelCase =model(**__UpperCAmelCase ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f'''Something is going wrong in the regression problem: intercepted {w.message}''' ) loss.backward() @slow def _lowerCAmelCase ( self ) -> str: for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase =LevitModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def _lowerCamelCase() -> Dict: _lowerCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def _lowerCAmelCase ( self ) -> Optional[Any]: return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase =LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( __UpperCAmelCase ) _lowerCAmelCase =self.default_image_processor _lowerCAmelCase =prepare_img() _lowerCAmelCase =image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): _lowerCAmelCase =model(**__UpperCAmelCase ) # verify the logits _lowerCAmelCase =torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) _lowerCAmelCase =torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
341
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __A = { 'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'], 'tokenization_perceiver': ['PerceiverTokenizer'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['PerceiverFeatureExtractor'] __A = ['PerceiverImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PerceiverForImageClassificationConvProcessing', 'PerceiverForImageClassificationFourier', 'PerceiverForImageClassificationLearned', 'PerceiverForMaskedLM', 'PerceiverForMultimodalAutoencoding', 'PerceiverForOpticalFlow', 'PerceiverForSequenceClassification', 'PerceiverLayer', 'PerceiverModel', 'PerceiverPreTrainedModel', ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
1
"""simple docstring""" import os from datetime import datetime as dt from github import Github __A = [ 'good first issue', 'good second issue', 'good difficult issue', 'enhancement', 'new pipeline/model', 'new scheduler', 'wip', ] def _lowerCamelCase() -> Optional[int]: _lowerCAmelCase =Github(os.environ["""GITHUB_TOKEN"""] ) _lowerCAmelCase =g.get_repo("""huggingface/diffusers""" ) _lowerCAmelCase =repo.get_issues(state="""open""" ) for issue in open_issues: _lowerCAmelCase =sorted(issue.get_comments() , key=lambda __UpperCamelCase : i.created_at , reverse=__UpperCamelCase ) _lowerCAmelCase =comments[0] if len(__UpperCamelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state="""closed""" ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state="""open""" ) issue.remove_from_labels("""stale""" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( """This issue has been automatically marked as stale because it has not had """ """recent activity. If you think this still needs to be addressed """ """please comment on this thread.\n\nPlease note that issues that do not follow the """ """[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """ """are likely to be ignored.""" ) issue.add_to_labels("""stale""" ) if __name__ == "__main__": main()
341
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = { 'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Swinv2ForImageClassification', 'Swinv2ForMaskedImageModeling', 'Swinv2Model', 'Swinv2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
1
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class lowerCamelCase__ : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=10 , __UpperCAmelCase=3 , __UpperCAmelCase=32 * 4 , __UpperCAmelCase=32 * 6 , __UpperCAmelCase=4 , __UpperCAmelCase=32 , ) -> Any: _lowerCAmelCase =parent _lowerCAmelCase =batch_size _lowerCAmelCase =is_training _lowerCAmelCase =use_auxiliary_loss _lowerCAmelCase =num_queries _lowerCAmelCase =num_channels _lowerCAmelCase =min_size _lowerCAmelCase =max_size _lowerCAmelCase =num_labels _lowerCAmelCase =mask_feature_size def _lowerCAmelCase ( self ) -> Optional[int]: _lowerCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __UpperCAmelCase ) _lowerCAmelCase =torch.ones([self.batch_size, self.min_size, self.max_size] , device=__UpperCAmelCase ) _lowerCAmelCase =( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__UpperCAmelCase ) > 0.5 ).float() _lowerCAmelCase =(torch.rand((self.batch_size, self.num_labels) , device=__UpperCAmelCase ) > 0.5).long() _lowerCAmelCase =self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def _lowerCAmelCase ( self ) -> int: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def _lowerCAmelCase ( self ) -> Optional[int]: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self.prepare_config_and_inputs() _lowerCAmelCase ={"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any: _lowerCAmelCase =output.encoder_hidden_states _lowerCAmelCase =output.pixel_decoder_hidden_states _lowerCAmelCase =output.transformer_decoder_hidden_states self.parent.assertTrue(len(__UpperCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__UpperCAmelCase ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__UpperCAmelCase ) , config.decoder_config.decoder_layers ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> Any: with torch.no_grad(): _lowerCAmelCase =MaskFormerModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() _lowerCAmelCase =model(pixel_values=__UpperCAmelCase , pixel_mask=__UpperCAmelCase ) _lowerCAmelCase =model(__UpperCAmelCase , output_hidden_states=__UpperCAmelCase ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__UpperCAmelCase , __UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: _lowerCAmelCase =MaskFormerForInstanceSegmentation(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() def comm_check_on_output(__UpperCAmelCase ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _lowerCAmelCase =model(pixel_values=__UpperCAmelCase , pixel_mask=__UpperCAmelCase ) _lowerCAmelCase =model(__UpperCAmelCase ) comm_check_on_output(__UpperCAmelCase ) _lowerCAmelCase =model( pixel_values=__UpperCAmelCase , pixel_mask=__UpperCAmelCase , mask_labels=__UpperCAmelCase , class_labels=__UpperCAmelCase ) comm_check_on_output(__UpperCAmelCase ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () lowerCamelCase = ( {'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase =MaskFormerModelTester(self ) _lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> str: self.config_tester.run_common_tests() def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase , **__UpperCAmelCase , output_hidden_states=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__UpperCAmelCase ) @unittest.skip(reason="""MaskFormer does not use inputs_embeds""" ) def _lowerCAmelCase ( self ) -> Dict: pass @unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" ) def _lowerCAmelCase ( self ) -> Optional[Any]: pass @unittest.skip(reason="""MaskFormer is not a generative model""" ) def _lowerCAmelCase ( self ) -> Tuple: pass @unittest.skip(reason="""MaskFormer does not use token embeddings""" ) def _lowerCAmelCase ( self ) -> List[str]: pass @require_torch_multi_gpu @unittest.skip( reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def _lowerCAmelCase ( self ) -> List[Any]: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def _lowerCAmelCase ( self ) -> List[str]: pass def _lowerCAmelCase ( self ) -> List[str]: _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase =model_class(__UpperCAmelCase ) _lowerCAmelCase =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase =[*signature.parameters.keys()] _lowerCAmelCase =["""pixel_values"""] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) @slow def _lowerCAmelCase ( self ) -> Optional[Any]: for model_name in ["facebook/maskformer-swin-small-coco"]: _lowerCAmelCase =MaskFormerModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase =(self.model_tester.min_size,) * 2 _lowerCAmelCase ={ """pixel_values""": torch.randn((2, 3, *size) , device=__UpperCAmelCase ), """mask_labels""": torch.randn((2, 10, *size) , device=__UpperCAmelCase ), """class_labels""": torch.zeros(2 , 10 , device=__UpperCAmelCase ).long(), } _lowerCAmelCase =MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__UpperCAmelCase ) _lowerCAmelCase =model(**__UpperCAmelCase ) self.assertTrue(outputs.loss is not None ) def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase , **__UpperCAmelCase , output_hidden_states=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase =model_class(__UpperCAmelCase ).to(__UpperCAmelCase ) _lowerCAmelCase =model(**__UpperCAmelCase , output_attentions=__UpperCAmelCase ) self.assertTrue(outputs.attentions is not None ) def _lowerCAmelCase ( self ) -> Dict: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _lowerCAmelCase =self.all_model_classes[1] _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() _lowerCAmelCase =model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.train() _lowerCAmelCase =model(__UpperCAmelCase , mask_labels=__UpperCAmelCase , class_labels=__UpperCAmelCase ).loss loss.backward() def _lowerCAmelCase ( self ) -> Optional[Any]: # only MaskFormerForInstanceSegmentation has the loss _lowerCAmelCase =self.all_model_classes[1] _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() _lowerCAmelCase =True _lowerCAmelCase =True _lowerCAmelCase =model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.train() _lowerCAmelCase =model(__UpperCAmelCase , mask_labels=__UpperCAmelCase , class_labels=__UpperCAmelCase ) _lowerCAmelCase =outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _lowerCAmelCase =outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _lowerCAmelCase =outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _lowerCAmelCase =outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__UpperCAmelCase ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __A = 1E-4 def _lowerCamelCase() -> List[Any]: _lowerCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def _lowerCAmelCase ( self ) -> Optional[Any]: return ( MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" ) if is_vision_available() else None ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(__UpperCAmelCase ) _lowerCAmelCase =self.default_image_processor _lowerCAmelCase =prepare_img() _lowerCAmelCase =image_processor(__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase ) _lowerCAmelCase =inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__UpperCAmelCase , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _lowerCAmelCase =model(**__UpperCAmelCase ) _lowerCAmelCase =torch.tensor( [[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) ) _lowerCAmelCase =torch.tensor( [[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) ) _lowerCAmelCase =torch.tensor( [[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__UpperCAmelCase ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) ) def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(__UpperCAmelCase ) .eval() ) _lowerCAmelCase =self.default_image_processor _lowerCAmelCase =prepare_img() _lowerCAmelCase =image_processor(__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase ) _lowerCAmelCase =inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__UpperCAmelCase , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _lowerCAmelCase =model(**__UpperCAmelCase ) # masks_queries_logits _lowerCAmelCase =outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _lowerCAmelCase =[ [-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3], [-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5], [-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2], ] _lowerCAmelCase =torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) ) # class_queries_logits _lowerCAmelCase =outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _lowerCAmelCase =torch.tensor( [ [1.65_12e00, -5.25_72e00, -3.35_19e00], [3.61_69e-02, -5.90_25e00, -2.93_13e00], [1.07_66e-04, -7.76_30e00, -5.12_63e00], ] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) ) def _lowerCAmelCase ( self ) -> Dict: _lowerCAmelCase =( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" ) .to(__UpperCAmelCase ) .eval() ) _lowerCAmelCase =self.default_image_processor _lowerCAmelCase =prepare_img() _lowerCAmelCase =image_processor(__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase ) _lowerCAmelCase =inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__UpperCAmelCase , (1, 3, 8_00, 10_88) ) with torch.no_grad(): _lowerCAmelCase =model(**__UpperCAmelCase ) # masks_queries_logits _lowerCAmelCase =outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _lowerCAmelCase =[[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]] _lowerCAmelCase =torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) ) # class_queries_logits _lowerCAmelCase =outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _lowerCAmelCase =torch.tensor( [[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __UpperCAmelCase , atol=__UpperCAmelCase ) ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(__UpperCAmelCase ) .eval() ) _lowerCAmelCase =self.default_image_processor _lowerCAmelCase =image_processor( [np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="""pt""" , ) _lowerCAmelCase =inputs["""pixel_values"""].to(__UpperCAmelCase ) _lowerCAmelCase =[el.to(__UpperCAmelCase ) for el in inputs["""mask_labels"""]] _lowerCAmelCase =[el.to(__UpperCAmelCase ) for el in inputs["""class_labels"""]] with torch.no_grad(): _lowerCAmelCase =model(**__UpperCAmelCase ) self.assertTrue(outputs.loss is not None )
341
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=1 ) -> Tuple: if n_shave_prefix_segments >= 0: return ".".join(path.split(""".""" )[n_shave_prefix_segments:] ) else: return ".".join(path.split(""".""" )[:n_shave_prefix_segments] ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> List[str]: _lowerCAmelCase =[] for old_item in old_list: _lowerCAmelCase =old_item.replace("""in_layers.0""" , """norm1""" ) _lowerCAmelCase =new_item.replace("""in_layers.2""" , """conv1""" ) _lowerCAmelCase =new_item.replace("""out_layers.0""" , """norm2""" ) _lowerCAmelCase =new_item.replace("""out_layers.3""" , """conv2""" ) _lowerCAmelCase =new_item.replace("""emb_layers.1""" , """time_emb_proj""" ) _lowerCAmelCase =new_item.replace("""skip_connection""" , """conv_shortcut""" ) _lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> Tuple: _lowerCAmelCase =[] for old_item in old_list: _lowerCAmelCase =old_item _lowerCAmelCase =new_item.replace("""norm.weight""" , """group_norm.weight""" ) _lowerCAmelCase =new_item.replace("""norm.bias""" , """group_norm.bias""" ) _lowerCAmelCase =new_item.replace("""proj_out.weight""" , """proj_attn.weight""" ) _lowerCAmelCase =new_item.replace("""proj_out.bias""" , """proj_attn.bias""" ) _lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None ) -> Optional[int]: assert isinstance(__UpperCamelCase , __UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): _lowerCAmelCase =old_checkpoint[path] _lowerCAmelCase =old_tensor.shape[0] // 3 _lowerCAmelCase =(-1, channels) if len(old_tensor.shape ) == 3 else (-1) _lowerCAmelCase =old_tensor.shape[0] // config["""num_head_channels"""] // 3 _lowerCAmelCase =old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =old_tensor.split(channels // num_heads , dim=1 ) _lowerCAmelCase =query.reshape(__UpperCamelCase ) _lowerCAmelCase =key.reshape(__UpperCamelCase ) _lowerCAmelCase =value.reshape(__UpperCamelCase ) for path in paths: _lowerCAmelCase =path["""new"""] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here _lowerCAmelCase =new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" ) _lowerCAmelCase =new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" ) _lowerCAmelCase =new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" ) if additional_replacements is not None: for replacement in additional_replacements: _lowerCAmelCase =new_path.replace(replacement["""old"""] , replacement["""new"""] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: _lowerCAmelCase =old_checkpoint[path["""old"""]][:, :, 0] else: _lowerCAmelCase =old_checkpoint[path["""old"""]] def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: _lowerCAmelCase ={} _lowerCAmelCase =checkpoint["""time_embed.0.weight"""] _lowerCAmelCase =checkpoint["""time_embed.0.bias"""] _lowerCAmelCase =checkpoint["""time_embed.2.weight"""] _lowerCAmelCase =checkpoint["""time_embed.2.bias"""] _lowerCAmelCase =checkpoint["""input_blocks.0.0.weight"""] _lowerCAmelCase =checkpoint["""input_blocks.0.0.bias"""] _lowerCAmelCase =checkpoint["""out.0.weight"""] _lowerCAmelCase =checkpoint["""out.0.bias"""] _lowerCAmelCase =checkpoint["""out.2.weight"""] _lowerCAmelCase =checkpoint["""out.2.bias"""] # Retrieves the keys for the input blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } # Retrieves the keys for the middle blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } # Retrieves the keys for the output blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } for i in range(1 , __UpperCamelCase ): _lowerCAmelCase =(i - 1) // (config["""num_res_blocks"""] + 1) _lowerCAmelCase =(i - 1) % (config["""num_res_blocks"""] + 1) _lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key] _lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key] if F'''input_blocks.{i}.0.op.weight''' in checkpoint: _lowerCAmelCase =checkpoint[ F'''input_blocks.{i}.0.op.weight''' ] _lowerCAmelCase =checkpoint[ F'''input_blocks.{i}.0.op.bias''' ] continue _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase ={"""old""": F'''input_blocks.{i}.0''', """new""": F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''} _lowerCAmelCase ={"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""} assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=__UpperCamelCase ) if len(__UpperCamelCase ): _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """old""": F'''input_blocks.{i}.1''', """new""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''', } _lowerCAmelCase ={ F'''input_blocks.{i}.1.qkv.bias''': { """key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', """query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', """value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''input_blocks.{i}.1.qkv.weight''': { """key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', """query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', """value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase , ) _lowerCAmelCase =middle_blocks[0] _lowerCAmelCase =middle_blocks[1] _lowerCAmelCase =middle_blocks[2] _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase ) _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase ) _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """middle_block.1.qkv.bias""": { """key""": """mid_block.attentions.0.key.bias""", """query""": """mid_block.attentions.0.query.bias""", """value""": """mid_block.attentions.0.value.bias""", }, """middle_block.1.qkv.weight""": { """key""": """mid_block.attentions.0.key.weight""", """query""": """mid_block.attentions.0.query.weight""", """value""": """mid_block.attentions.0.value.weight""", }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase ) for i in range(__UpperCamelCase ): _lowerCAmelCase =i // (config["""num_res_blocks"""] + 1) _lowerCAmelCase =i % (config["""num_res_blocks"""] + 1) _lowerCAmelCase =[shave_segments(__UpperCamelCase , 2 ) for name in output_blocks[i]] _lowerCAmelCase ={} for layer in output_block_layers: _lowerCAmelCase , _lowerCAmelCase =layer.split(""".""" )[0], shave_segments(__UpperCamelCase , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(__UpperCamelCase ) else: _lowerCAmelCase =[layer_name] if len(__UpperCamelCase ) > 1: _lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key] _lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key] _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase ={"""old""": F'''output_blocks.{i}.0''', """new""": F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''} assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): _lowerCAmelCase =list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] ) _lowerCAmelCase =checkpoint[ F'''output_blocks.{i}.{index}.conv.weight''' ] _lowerCAmelCase =checkpoint[ F'''output_blocks.{i}.{index}.conv.bias''' ] # Clear attentions as they have been attributed above. if len(__UpperCamelCase ) == 2: _lowerCAmelCase =[] if len(__UpperCamelCase ): _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """old""": F'''output_blocks.{i}.1''', """new""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''', } _lowerCAmelCase ={ F'''output_blocks.{i}.1.qkv.bias''': { """key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', """query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', """value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''output_blocks.{i}.1.qkv.weight''': { """key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', """query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', """value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=__UpperCamelCase , ) else: _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase , n_shave_prefix_segments=1 ) for path in resnet_0_paths: _lowerCAmelCase =""".""".join(["""output_blocks""", str(__UpperCamelCase ), path["""old"""]] ) _lowerCAmelCase =""".""".join(["""up_blocks""", str(__UpperCamelCase ), """resnets""", str(__UpperCamelCase ), path["""new"""]] ) _lowerCAmelCase =checkpoint[old_path] return new_checkpoint if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the architecture.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') __A = parser.parse_args() __A = torch.load(args.checkpoint_path) with open(args.config_file) as f: __A = json.loads(f.read()) __A = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] __A = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: __A = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1])) __A = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1])) __A = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
341
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __A = logging.get_logger(__name__) class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = ['''pixel_values'''] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 2_55 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None: super().__init__(**__UpperCAmelCase ) _lowerCAmelCase =size if size is not None else {"""shortest_edge""": 3_84} _lowerCAmelCase =get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) _lowerCAmelCase =do_resize _lowerCAmelCase =size # Default value set here for backwards compatibility where the value in config is None _lowerCAmelCase =crop_pct if crop_pct is not None else 2_24 / 2_56 _lowerCAmelCase =resample _lowerCAmelCase =do_rescale _lowerCAmelCase =rescale_factor _lowerCAmelCase =do_normalize _lowerCAmelCase =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowerCAmelCase =image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: _lowerCAmelCase =get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) if "shortest_edge" not in size: raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' ) _lowerCAmelCase =size["""shortest_edge"""] if shortest_edge < 3_84: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct _lowerCAmelCase =int(shortest_edge / crop_pct ) _lowerCAmelCase =get_resize_output_image_size(__UpperCAmelCase , size=__UpperCAmelCase , default_to_square=__UpperCAmelCase ) _lowerCAmelCase =resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=__UpperCAmelCase , size=(shortest_edge, shortest_edge) , data_format=__UpperCAmelCase , **__UpperCAmelCase ) else: # warping (no cropping) when evaluated at 384 or larger return resize( __UpperCAmelCase , size=(shortest_edge, shortest_edge) , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> List[str]: return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray: return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ) -> PIL.Image.Image: _lowerCAmelCase =do_resize if do_resize is not None else self.do_resize _lowerCAmelCase =crop_pct if crop_pct is not None else self.crop_pct _lowerCAmelCase =resample if resample is not None else self.resample _lowerCAmelCase =do_rescale if do_rescale is not None else self.do_rescale _lowerCAmelCase =rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCAmelCase =do_normalize if do_normalize is not None else self.do_normalize _lowerCAmelCase =image_mean if image_mean is not None else self.image_mean _lowerCAmelCase =image_std if image_std is not None else self.image_std _lowerCAmelCase =size if size is not None else self.size _lowerCAmelCase =get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) _lowerCAmelCase =make_list_of_images(__UpperCAmelCase ) if not valid_images(__UpperCAmelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_resize and size["shortest_edge"] < 3_84 and crop_pct is None: raise ValueError("""crop_pct must be specified if size < 384.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. _lowerCAmelCase =[to_numpy_array(__UpperCAmelCase ) for image in images] if do_resize: _lowerCAmelCase =[self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , crop_pct=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images] if do_rescale: _lowerCAmelCase =[self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images] if do_normalize: _lowerCAmelCase =[self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images] _lowerCAmelCase =[to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images] _lowerCAmelCase ={"""pixel_values""": images} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
341
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]: _lowerCAmelCase =0 _lowerCAmelCase =len(__UpperCamelCase ) for i in range(n - 1 ): for j in range(i + 1 , __UpperCamelCase ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def _lowerCamelCase(__UpperCamelCase ) -> List[Any]: if len(__UpperCamelCase ) <= 1: return arr, 0 _lowerCAmelCase =len(__UpperCamelCase ) // 2 _lowerCAmelCase =arr[0:mid] _lowerCAmelCase =arr[mid:] _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =_count_cross_inversions(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =inversion_p + inversions_q + cross_inversions return c, num_inversions def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Any: _lowerCAmelCase =[] _lowerCAmelCase =_lowerCAmelCase =_lowerCAmelCase =0 while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(__UpperCamelCase ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(__UpperCamelCase ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def _lowerCamelCase() -> str: _lowerCAmelCase =[10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) _lowerCAmelCase =count_inversions_bf(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 8 print("""number of inversions = """ , __UpperCamelCase ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() _lowerCAmelCase =count_inversions_bf(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , __UpperCamelCase ) # an empty list should also have zero inversions _lowerCAmelCase =[] _lowerCAmelCase =count_inversions_bf(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , __UpperCamelCase ) if __name__ == "__main__": main()
341
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor from ..utils import is_datasets_available from .base import PipelineTool if is_datasets_available(): from datasets import load_dataset class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = '''microsoft/speecht5_tts''' lowerCamelCase = ( '''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the ''' '''text to read (in English) and returns a waveform object containing the sound.''' ) lowerCamelCase = '''text_reader''' lowerCamelCase = SpeechTaProcessor lowerCamelCase = SpeechTaForTextToSpeech lowerCamelCase = SpeechTaHifiGan lowerCamelCase = ['''text'''] lowerCamelCase = ['''audio'''] def _lowerCAmelCase ( self ) -> List[Any]: if self.post_processor is None: _lowerCAmelCase ="""microsoft/speecht5_hifigan""" super().setup() def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None ) -> Tuple: _lowerCAmelCase =self.pre_processor(text=__UpperCAmelCase , return_tensors="""pt""" , truncation=__UpperCAmelCase ) if speaker_embeddings is None: if not is_datasets_available(): raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" ) _lowerCAmelCase =load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" ) _lowerCAmelCase =torch.tensor(embeddings_dataset[73_05]["""xvector"""] ).unsqueeze(0 ) return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings} def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]: with torch.no_grad(): return self.model.generate_speech(**__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[Any]: with torch.no_grad(): return self.post_processor(__UpperCAmelCase ).cpu().detach()
341
"""simple docstring""" import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class lowerCamelCase__ : '''simple docstring''' lowerCamelCase = None lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = None lowerCamelCase = None lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = True lowerCamelCase = None lowerCamelCase = 1 lowerCamelCase = None lowerCamelCase = False lowerCamelCase = None lowerCamelCase = None def _lowerCAmelCase ( self ) -> "DownloadConfig": return self.__class__(**{k: copy.deepcopy(__UpperCAmelCase ) for k, v in self.__dict__.items()} )
341
1
"""simple docstring""" from collections import deque from math import floor from random import random from time import time class lowerCamelCase__ : '''simple docstring''' def __init__( self ) -> Union[str, Any]: _lowerCAmelCase ={} def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> List[str]: if self.graph.get(__UpperCAmelCase ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: _lowerCAmelCase =[[w, v]] if not self.graph.get(__UpperCAmelCase ): _lowerCAmelCase =[] def _lowerCAmelCase ( self ) -> Union[str, Any]: return list(self.graph ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: if self.graph.get(__UpperCAmelCase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Dict: if s == d: return [] _lowerCAmelCase =[] _lowerCAmelCase =[] if s == -2: _lowerCAmelCase =list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _lowerCAmelCase =s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _lowerCAmelCase =s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(__UpperCAmelCase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) _lowerCAmelCase =node[1] break # check if all the children are visited if s == ss: stack.pop() if len(__UpperCAmelCase ) != 0: _lowerCAmelCase =stack[len(__UpperCAmelCase ) - 1] else: _lowerCAmelCase =ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return visited def _lowerCAmelCase ( self , __UpperCAmelCase=-1 ) -> Optional[int]: if c == -1: _lowerCAmelCase =floor(random() * 1_00_00 ) + 10 for i in range(__UpperCAmelCase ): # every vertex has max 100 edges for _ in range(floor(random() * 1_02 ) + 1 ): _lowerCAmelCase =floor(random() * c ) + 1 if n != i: self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 ) def _lowerCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple: _lowerCAmelCase =deque() _lowerCAmelCase =[] if s == -2: _lowerCAmelCase =list(self.graph )[0] d.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) while d: _lowerCAmelCase =d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str: _lowerCAmelCase =0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def _lowerCAmelCase ( self , __UpperCAmelCase ) -> int: return len(self.graph[u] ) def _lowerCAmelCase ( self , __UpperCAmelCase=-2 ) -> Dict: _lowerCAmelCase =[] _lowerCAmelCase =[] if s == -2: _lowerCAmelCase =list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _lowerCAmelCase =s _lowerCAmelCase =[] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _lowerCAmelCase =s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _lowerCAmelCase =node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(__UpperCAmelCase ) != 0: _lowerCAmelCase =stack[len(__UpperCAmelCase ) - 1] else: _lowerCAmelCase =ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return sorted_nodes def _lowerCAmelCase ( self ) -> Dict: _lowerCAmelCase =[] _lowerCAmelCase =[] _lowerCAmelCase =list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _lowerCAmelCase =-2 _lowerCAmelCase =[] _lowerCAmelCase =s _lowerCAmelCase =False _lowerCAmelCase =set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _lowerCAmelCase =s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _lowerCAmelCase =len(__UpperCAmelCase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _lowerCAmelCase =node[1] break # check if all the children are visited if s == ss: stack.pop() _lowerCAmelCase =True if len(__UpperCAmelCase ) != 0: _lowerCAmelCase =stack[len(__UpperCAmelCase ) - 1] else: _lowerCAmelCase =False indirect_parents.append(__UpperCAmelCase ) _lowerCAmelCase =s _lowerCAmelCase =ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return list(__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =[] _lowerCAmelCase =[] _lowerCAmelCase =list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _lowerCAmelCase =-2 _lowerCAmelCase =[] _lowerCAmelCase =s _lowerCAmelCase =False _lowerCAmelCase =set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _lowerCAmelCase =s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _lowerCAmelCase =len(__UpperCAmelCase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _lowerCAmelCase =node[1] break # check if all the children are visited if s == ss: stack.pop() _lowerCAmelCase =True if len(__UpperCAmelCase ) != 0: _lowerCAmelCase =stack[len(__UpperCAmelCase ) - 1] else: _lowerCAmelCase =False indirect_parents.append(__UpperCAmelCase ) _lowerCAmelCase =s _lowerCAmelCase =ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return False def _lowerCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[int]: _lowerCAmelCase =time() self.dfs(__UpperCAmelCase , __UpperCAmelCase ) _lowerCAmelCase =time() return end - begin def _lowerCAmelCase ( self , __UpperCAmelCase=-2 ) -> str: _lowerCAmelCase =time() self.bfs(__UpperCAmelCase ) _lowerCAmelCase =time() return end - begin class lowerCamelCase__ : '''simple docstring''' def __init__( self ) -> Any: _lowerCAmelCase ={} def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ) -> Optional[Any]: # check if the u exists if self.graph.get(__UpperCAmelCase ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist _lowerCAmelCase =[[w, v]] # add the other way if self.graph.get(__UpperCAmelCase ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist _lowerCAmelCase =[[w, u]] def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]: if self.graph.get(__UpperCAmelCase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(__UpperCAmelCase ) # the other way round if self.graph.get(__UpperCAmelCase ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> List[Any]: if s == d: return [] _lowerCAmelCase =[] _lowerCAmelCase =[] if s == -2: _lowerCAmelCase =list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _lowerCAmelCase =s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _lowerCAmelCase =s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(__UpperCAmelCase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) _lowerCAmelCase =node[1] break # check if all the children are visited if s == ss: stack.pop() if len(__UpperCAmelCase ) != 0: _lowerCAmelCase =stack[len(__UpperCAmelCase ) - 1] else: _lowerCAmelCase =ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return visited def _lowerCAmelCase ( self , __UpperCAmelCase=-1 ) -> str: if c == -1: _lowerCAmelCase =floor(random() * 1_00_00 ) + 10 for i in range(__UpperCAmelCase ): # every vertex has max 100 edges for _ in range(floor(random() * 1_02 ) + 1 ): _lowerCAmelCase =floor(random() * c ) + 1 if n != i: self.add_pair(__UpperCAmelCase , __UpperCAmelCase , 1 ) def _lowerCAmelCase ( self , __UpperCAmelCase=-2 ) -> List[Any]: _lowerCAmelCase =deque() _lowerCAmelCase =[] if s == -2: _lowerCAmelCase =list(self.graph )[0] d.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) while d: _lowerCAmelCase =d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[Any]: return len(self.graph[u] ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =[] _lowerCAmelCase =[] _lowerCAmelCase =list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _lowerCAmelCase =-2 _lowerCAmelCase =[] _lowerCAmelCase =s _lowerCAmelCase =False _lowerCAmelCase =set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _lowerCAmelCase =s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _lowerCAmelCase =len(__UpperCAmelCase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _lowerCAmelCase =node[1] break # check if all the children are visited if s == ss: stack.pop() _lowerCAmelCase =True if len(__UpperCAmelCase ) != 0: _lowerCAmelCase =stack[len(__UpperCAmelCase ) - 1] else: _lowerCAmelCase =False indirect_parents.append(__UpperCAmelCase ) _lowerCAmelCase =s _lowerCAmelCase =ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return list(__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =[] _lowerCAmelCase =[] _lowerCAmelCase =list(self.graph )[0] stack.append(__UpperCAmelCase ) visited.append(__UpperCAmelCase ) _lowerCAmelCase =-2 _lowerCAmelCase =[] _lowerCAmelCase =s _lowerCAmelCase =False _lowerCAmelCase =set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: _lowerCAmelCase =s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): _lowerCAmelCase =len(__UpperCAmelCase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) _lowerCAmelCase =node[1] break # check if all the children are visited if s == ss: stack.pop() _lowerCAmelCase =True if len(__UpperCAmelCase ) != 0: _lowerCAmelCase =stack[len(__UpperCAmelCase ) - 1] else: _lowerCAmelCase =False indirect_parents.append(__UpperCAmelCase ) _lowerCAmelCase =s _lowerCAmelCase =ss # check if se have reached the starting point if len(__UpperCAmelCase ) == 0: return False def _lowerCAmelCase ( self ) -> List[Any]: return list(self.graph ) def _lowerCAmelCase ( self , __UpperCAmelCase=-2 , __UpperCAmelCase=-1 ) -> Optional[Any]: _lowerCAmelCase =time() self.dfs(__UpperCAmelCase , __UpperCAmelCase ) _lowerCAmelCase =time() return end - begin def _lowerCAmelCase ( self , __UpperCAmelCase=-2 ) -> Tuple: _lowerCAmelCase =time() self.bfs(__UpperCAmelCase ) _lowerCAmelCase =time() return end - begin
341
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> int: return int((input_a, input_a).count(1 ) != 0 ) def _lowerCamelCase() -> None: assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
341
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor __A = logging.get_logger(__name__) class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> None: warnings.warn( """The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use MobileViTImageProcessor instead.""" , __UpperCAmelCase , ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
341
"""simple docstring""" import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py __A = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n' __A = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n' __A = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): '''simple docstring''' def _lowerCAmelCase ( self ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[ """https://en.wikipedia.org/wiki/BLEU""", """https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""", ] , ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=4 , __UpperCAmelCase=False ) -> Tuple: _lowerCAmelCase =compute_bleu( reference_corpus=__UpperCAmelCase , translation_corpus=__UpperCAmelCase , max_order=__UpperCAmelCase , smooth=__UpperCAmelCase ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) =score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
341
1
"""simple docstring""" import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate __A = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow('', '|', '|'), datarow=DataRow('', '|', '|'), padding=1, with_header_hide=None, ) __A = [] __A = [] __A = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}} __A = [ { 'type': 'header', 'text': { 'type': 'plain_text', 'text': F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""", 'emoji': True, }, } ] __A = 0 for log in Path().glob('*.log'): __A = 0 with open(log, 'r') as f: for line in f: __A = json.loads(line) if line.get('nodeid', '') != "": __A = line['nodeid'] if line.get('duration', None) is not None: __A = F"""{line["duration"]:.4f}""" if line.get('outcome', '') == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split('_')[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) __A = [] log.unlink() __A = '' __A = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += F"*{name[1:]}: {num_failed} failed test*\n" else: message += F"*{name[1:]}: {num_failed} failed tests*\n" __A = [] __A = {} for test in failed_tests: __A = test[0].split('::') __A = data[0].split('/')[-1] if data[0] not in filesafailed: __A = [data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) __A = [test[0] for test in failed_table] __A = list(set(files)) # Count number of instances in failed_tests __A = [] for file in individual_files: table.append([file, len(filesafailed[file])]) __A = tabulate( table, headers=['Test Location', 'Num Failed'], tablefmt=hf_table_format, stralign='right', ) message += F"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 3000: __A = 'Too many failed tests, please see the full report in the Action results.' __A = len(err) + 10 __A = message[: 3000 - offset] + F"""\n...\n```\n{err}""" print(F"""### {message}""") else: __A = 'No failed tests! 🤗' print(F"""## {message}""") payload.append(no_error_payload) if os.environ.get('TEST_TYPE', '') != "": from slack_sdk import WebClient __A = WebClient(token=os.environ['SLACK_API_TOKEN']) if message != "No failed tests! 🤗": __A = { 'type': 'section', 'text': { 'type': 'mrkdwn', 'text': message, }, } payload.append(md_report) __A = { 'type': 'section', 'text': { 'type': 'mrkdwn', 'text': '*For more details:*', }, 'accessory': { 'type': 'button', 'text': { 'type': 'plain_text', 'text': 'Check Action results', 'emoji': True, }, 'url': F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""", }, } payload.append(action_button) __A = { 'type': 'context', 'elements': [ { 'type': 'plain_text', 'text': F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""", } ], } payload.append(date_report) __A = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload) __A = response.data['ts'] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name __A = '' for i, row in enumerate(test_failures): if row[0] != test_class: __A = row[0] else: __A = '' __A = { 'type': 'section', 'text': { 'type': 'mrkdwn', 'text': F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""", }, } client.chat_postMessage( channel='#accelerate-ci-daily', thread_ts=ts, blocks=[payload], )
341
"""simple docstring""" import argparse from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--original_config_file', type=str, required=True, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--image_size', default=512, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') def _lowerCamelCase(__UpperCamelCase ) -> List[str]: if string == "True": return True elif string == "False": return False else: raise ValueError(F'''could not parse string as bool {string}''' ) parser.add_argument( '--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool ) parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int) __A = parser.parse_args() __A = download_controlnet_from_original_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, extract_ema=args.extract_ema, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, use_linear_projection=args.use_linear_projection, cross_attention_dim=args.cross_attention_dim, ) controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
341
1
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]: _lowerCAmelCase =0 _lowerCAmelCase =len(__UpperCamelCase ) for i in range(n - 1 ): for j in range(i + 1 , __UpperCamelCase ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def _lowerCamelCase(__UpperCamelCase ) -> List[Any]: if len(__UpperCamelCase ) <= 1: return arr, 0 _lowerCAmelCase =len(__UpperCamelCase ) // 2 _lowerCAmelCase =arr[0:mid] _lowerCAmelCase =arr[mid:] _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =_count_cross_inversions(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =inversion_p + inversions_q + cross_inversions return c, num_inversions def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Any: _lowerCAmelCase =[] _lowerCAmelCase =_lowerCAmelCase =_lowerCAmelCase =0 while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(__UpperCamelCase ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(__UpperCamelCase ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def _lowerCamelCase() -> str: _lowerCAmelCase =[10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) _lowerCAmelCase =count_inversions_bf(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 8 print("""number of inversions = """ , __UpperCamelCase ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() _lowerCAmelCase =count_inversions_bf(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , __UpperCamelCase ) # an empty list should also have zero inversions _lowerCAmelCase =[] _lowerCAmelCase =count_inversions_bf(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , __UpperCamelCase ) if __name__ == "__main__": main()
341
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available __A = { 'configuration_audio_spectrogram_transformer': [ 'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ASTConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'ASTForAudioClassification', 'ASTModel', 'ASTPreTrainedModel', ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['ASTFeatureExtractor'] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
1
"""simple docstring""" import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel __A = { 'text_branch': 'text_model', 'audio_branch': 'audio_model.audio_encoder', 'attn': 'attention.self', 'self.proj': 'output.dense', 'attention.self_mask': 'attn_mask', 'mlp.fc1': 'intermediate.dense', 'mlp.fc2': 'output.dense', 'norm1': 'layernorm_before', 'norm2': 'layernorm_after', 'bn0': 'batch_norm', } __A = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc') def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=False ) -> List[Any]: _lowerCAmelCase , _lowerCAmelCase =create_model( """HTSAT-tiny""" , """roberta""" , __UpperCamelCase , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=__UpperCamelCase , fusion_type="""aff_2d""" if enable_fusion else None , ) return model, model_cfg def _lowerCamelCase(__UpperCamelCase ) -> Union[str, Any]: _lowerCAmelCase ={} _lowerCAmelCase =R""".*sequential.(\d+).*""" _lowerCAmelCase =R""".*_projection.(\d+).*""" for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: _lowerCAmelCase =key.replace(__UpperCamelCase , __UpperCamelCase ) if re.match(__UpperCamelCase , __UpperCamelCase ): # replace sequential layers with list _lowerCAmelCase =re.match(__UpperCamelCase , __UpperCamelCase ).group(1 ) _lowerCAmelCase =key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(__UpperCamelCase )//3}.linear.''' ) elif re.match(__UpperCamelCase , __UpperCamelCase ): _lowerCAmelCase =int(re.match(__UpperCamelCase , __UpperCamelCase ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... _lowerCAmelCase =1 if projecton_layer == 0 else 2 _lowerCAmelCase =key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' ) if "audio" and "qkv" in key: # split qkv into query key and value _lowerCAmelCase =value _lowerCAmelCase =mixed_qkv.size(0 ) // 3 _lowerCAmelCase =mixed_qkv[:qkv_dim] _lowerCAmelCase =mixed_qkv[qkv_dim : qkv_dim * 2] _lowerCAmelCase =mixed_qkv[qkv_dim * 2 :] _lowerCAmelCase =query_layer _lowerCAmelCase =key_layer _lowerCAmelCase =value_layer else: _lowerCAmelCase =value return model_state_dict def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> Any: _lowerCAmelCase , _lowerCAmelCase =init_clap(__UpperCamelCase , enable_fusion=__UpperCamelCase ) clap_model.eval() _lowerCAmelCase =clap_model.state_dict() _lowerCAmelCase =rename_state_dict(__UpperCamelCase ) _lowerCAmelCase =ClapConfig() _lowerCAmelCase =enable_fusion _lowerCAmelCase =ClapModel(__UpperCamelCase ) # ignore the spectrogram embedding layer model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase ) model.save_pretrained(__UpperCamelCase ) transformers_config.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not') __A = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
341
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __A = { 'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'], 'tokenization_m2m_100': ['M2M100Tokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST', 'M2M100ForConditionalGeneration', 'M2M100Model', 'M2M100PreTrainedModel', ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import XLMRobertaTokenizer from diffusers import ( AltDiffusionImgaImgPipeline, AutoencoderKL, PNDMScheduler, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =1 _lowerCAmelCase =3 _lowerCAmelCase =(32, 32) _lowerCAmelCase =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase ) return image @property def _lowerCAmelCase ( self ) -> str: torch.manual_seed(0 ) _lowerCAmelCase =UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) return model @property def _lowerCAmelCase ( self ) -> str: torch.manual_seed(0 ) _lowerCAmelCase =AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) return model @property def _lowerCAmelCase ( self ) -> List[Any]: torch.manual_seed(0 ) _lowerCAmelCase =RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , ) return RobertaSeriesModelWithTransformation(__UpperCAmelCase ) @property def _lowerCAmelCase ( self ) -> Optional[Any]: def extract(*__UpperCAmelCase , **__UpperCAmelCase ): class lowerCamelCase__ : '''simple docstring''' def __init__( self ) -> Optional[Any]: _lowerCAmelCase =torch.ones([0] ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[Any]: self.pixel_values.to(__UpperCAmelCase ) return self return Out() return extract def _lowerCAmelCase ( self ) -> str: _lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase =self.dummy_cond_unet _lowerCAmelCase =PNDMScheduler(skip_prk_steps=__UpperCAmelCase ) _lowerCAmelCase =self.dummy_vae _lowerCAmelCase =self.dummy_text_encoder _lowerCAmelCase =XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" ) _lowerCAmelCase =77 _lowerCAmelCase =self.dummy_image.to(__UpperCAmelCase ) _lowerCAmelCase =init_image / 2 + 0.5 # make sure here that pndm scheduler skips prk _lowerCAmelCase =AltDiffusionImgaImgPipeline( unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=self.dummy_extractor , ) _lowerCAmelCase =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__UpperCAmelCase ) _lowerCAmelCase =alt_pipe.to(__UpperCAmelCase ) alt_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase ="""A painting of a squirrel eating a burger""" _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) _lowerCAmelCase =alt_pipe( [prompt] , generator=__UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=__UpperCAmelCase , ) _lowerCAmelCase =output.images _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) _lowerCAmelCase =alt_pipe( [prompt] , generator=__UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=__UpperCAmelCase , return_dict=__UpperCAmelCase , )[0] _lowerCAmelCase =image[0, -3:, -3:, -1] _lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _lowerCAmelCase =np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3 @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =self.dummy_cond_unet _lowerCAmelCase =PNDMScheduler(skip_prk_steps=__UpperCAmelCase ) _lowerCAmelCase =self.dummy_vae _lowerCAmelCase =self.dummy_text_encoder _lowerCAmelCase =XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" ) _lowerCAmelCase =77 _lowerCAmelCase =self.dummy_image.to(__UpperCAmelCase ) # put models in fp16 _lowerCAmelCase =unet.half() _lowerCAmelCase =vae.half() _lowerCAmelCase =bert.half() # make sure here that pndm scheduler skips prk _lowerCAmelCase =AltDiffusionImgaImgPipeline( unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=self.dummy_extractor , ) _lowerCAmelCase =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__UpperCAmelCase ) _lowerCAmelCase =alt_pipe.to(__UpperCAmelCase ) alt_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase ="""A painting of a squirrel eating a burger""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =alt_pipe( [prompt] , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , image=__UpperCAmelCase , ).images assert image.shape == (1, 32, 32, 3) @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def _lowerCAmelCase ( self ) -> Optional[int]: _lowerCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) # resize to resolution that is divisible by 8 but not 16 or 32 _lowerCAmelCase =init_image.resize((7_60, 5_04) ) _lowerCAmelCase ="""BAAI/AltDiffusion""" _lowerCAmelCase =AltDiffusionImgaImgPipeline.from_pretrained( __UpperCAmelCase , safety_checker=__UpperCAmelCase , ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _lowerCAmelCase ="""A fantasy landscape, trending on artstation""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , strength=0.7_5 , guidance_scale=7.5 , generator=__UpperCAmelCase , output_type="""np""" , ) _lowerCAmelCase =output.images[0] _lowerCAmelCase =image[2_55:2_58, 3_83:3_86, -1] assert image.shape == (5_04, 7_60, 3) _lowerCAmelCase =np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> str: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) _lowerCAmelCase =init_image.resize((7_68, 5_12) ) _lowerCAmelCase =load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" ) _lowerCAmelCase ="""BAAI/AltDiffusion""" _lowerCAmelCase =AltDiffusionImgaImgPipeline.from_pretrained( __UpperCAmelCase , safety_checker=__UpperCAmelCase , ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _lowerCAmelCase ="""A fantasy landscape, trending on artstation""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , strength=0.7_5 , guidance_scale=7.5 , generator=__UpperCAmelCase , output_type="""np""" , ) _lowerCAmelCase =output.images[0] assert image.shape == (5_12, 7_68, 3) # img2img is flaky across GPUs even in fp32, so using MAE here assert np.abs(expected_image - image ).max() < 1e-2
341
"""simple docstring""" import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets __A = datasets.logging.get_logger(__name__) __A = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n' __A = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n' __A = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n' def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="dummy_doc" ) -> Dict: _lowerCAmelCase ={doc: key_lines} _lowerCAmelCase ={doc: sys_lines} _lowerCAmelCase ={} _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase ) key_singletons_num += singletons_num if NP_only or min_span: _lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , sys_doc_lines[doc] , __UpperCamelCase ) sys_singletons_num += singletons_num if NP_only or min_span: _lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase ) if remove_nested: _lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters _lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters _lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =(key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( """Number of removed nested coreferring mentions in the key """ F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' ) logger.info( """Number of resulting singleton clusters in the key """ F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' ) if not keep_singletons: logger.info( F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ''' """files, respectively""" ) return doc_coref_infos def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int: _lowerCAmelCase =get_coref_infos(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase ={} _lowerCAmelCase =0 _lowerCAmelCase =0 for name, metric in metrics: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =evaluator.evaluate_documents(__UpperCamelCase , __UpperCamelCase , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} ) logger.info( name.ljust(10 ) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , ) if conll_subparts_num == 3: _lowerCAmelCase =(conll / 3) * 100 logger.info(F'''CoNLL score: {conll:.2f}''' ) output_scores.update({"""conll_score""": conll} ) return output_scores def _lowerCamelCase(__UpperCamelCase ) -> Tuple: _lowerCAmelCase =False for line in key_lines: if not line.startswith("""#""" ): if len(line.split() ) > 6: _lowerCAmelCase =line.split()[5] if not parse_col == "-": _lowerCAmelCase =True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): '''simple docstring''' def _lowerCAmelCase ( self ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Sequence(datasets.Value("""string""" ) ), } ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[ """https://github.com/ns-moosavi/coval""", """https://www.aclweb.org/anthology/P16-1060""", """http://www.conll.cemantix.org/2012/data.html""", ] , ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False ) -> Optional[Any]: _lowerCAmelCase =[ ("""mentions""", evaluator.mentions), ("""muc""", evaluator.muc), ("""bcub""", evaluator.b_cubed), ("""ceafe""", evaluator.ceafe), ("""lea""", evaluator.lea), ] if min_span: _lowerCAmelCase =util.check_gold_parse_annotation(__UpperCAmelCase ) if not has_gold_parse: raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" _lowerCAmelCase =evaluate( key_lines=__UpperCAmelCase , sys_lines=__UpperCAmelCase , metrics=__UpperCAmelCase , NP_only=__UpperCAmelCase , remove_nested=__UpperCAmelCase , keep_singletons=__UpperCAmelCase , min_span=__UpperCAmelCase , ) return score
341
1
"""simple docstring""" from jiwer import compute_measures import datasets __A = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n' __A = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n' __A = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): '''simple docstring''' def _lowerCAmelCase ( self ) -> Optional[int]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[ """https://en.wikipedia.org/wiki/Word_error_rate""", ] , ) def _lowerCAmelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=False ) -> Union[str, Any]: if concatenate_texts: return compute_measures(__UpperCAmelCase , __UpperCAmelCase )["wer"] else: _lowerCAmelCase =0 _lowerCAmelCase =0 for prediction, reference in zip(__UpperCAmelCase , __UpperCAmelCase ): _lowerCAmelCase =compute_measures(__UpperCAmelCase , __UpperCAmelCase ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
341
"""simple docstring""" from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class lowerCamelCase__ : '''simple docstring''' lowerCamelCase = XGLMConfig lowerCamelCase = {} lowerCamelCase = '''gelu''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=0.0_2 , ) -> List[str]: _lowerCAmelCase =parent _lowerCAmelCase =batch_size _lowerCAmelCase =seq_length _lowerCAmelCase =is_training _lowerCAmelCase =use_input_mask _lowerCAmelCase =use_labels _lowerCAmelCase =vocab_size _lowerCAmelCase =d_model _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads _lowerCAmelCase =ffn_dim _lowerCAmelCase =activation_function _lowerCAmelCase =activation_dropout _lowerCAmelCase =attention_dropout _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =initializer_range _lowerCAmelCase =None _lowerCAmelCase =0 _lowerCAmelCase =2 _lowerCAmelCase =1 def _lowerCAmelCase ( self ) -> Dict: return XGLMConfig.from_pretrained("""facebook/xglm-564M""" ) def _lowerCAmelCase ( self ) -> str: _lowerCAmelCase =tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) _lowerCAmelCase =None if self.use_input_mask: _lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase =self.get_config() _lowerCAmelCase =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def _lowerCAmelCase ( self ) -> str: return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__UpperCAmelCase , ) def _lowerCAmelCase ( self ) -> Dict: _lowerCAmelCase =self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) =config_and_inputs _lowerCAmelCase ={ """input_ids""": input_ids, """head_mask""": head_mask, } return config, inputs_dict @require_tf class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () lowerCamelCase = (TFXGLMForCausalLM,) if is_tf_available() else () lowerCamelCase = ( {'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =TFXGLMModelTester(self ) _lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase , n_embd=37 ) def _lowerCAmelCase ( self ) -> int: self.config_tester.run_common_tests() @slow def _lowerCAmelCase ( self ) -> Union[str, Any]: for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase =TFXGLMModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" ) def _lowerCAmelCase ( self ) -> Union[str, Any]: super().test_resize_token_embeddings() @require_tf class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def _lowerCAmelCase ( self , __UpperCAmelCase=True ) -> str: _lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase =tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off _lowerCAmelCase =[2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81] # fmt: on _lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCAmelCase ) @slow def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) tf.random.set_seed(0 ) _lowerCAmelCase =tokenizer("""Today is a nice day and""" , return_tensors="""tf""" ) _lowerCAmelCase =tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(""":/CPU:0""" ): _lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , seed=[7, 0] ) _lowerCAmelCase =tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =( """Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due""" ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) @slow def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase ="""left""" # use different length sentences to test batching _lowerCAmelCase =[ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When""", """Hello, my dog is a little""", ] _lowerCAmelCase =tokenizer(__UpperCAmelCase , return_tensors="""tf""" , padding=__UpperCAmelCase ) _lowerCAmelCase =inputs["""input_ids"""] _lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 ) _lowerCAmelCase =tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids _lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 ) _lowerCAmelCase =tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids _lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 ) _lowerCAmelCase =tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =[ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """ """a single""", """Hello, my dog is a little bit of a shy one, but he is very friendly""", ] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
341
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class lowerCamelCase__ : '''simple docstring''' lowerCamelCase = XGLMConfig lowerCamelCase = {} lowerCamelCase = '''gelu''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=0.0_2 , ) -> List[str]: _lowerCAmelCase =parent _lowerCAmelCase =batch_size _lowerCAmelCase =seq_length _lowerCAmelCase =is_training _lowerCAmelCase =use_input_mask _lowerCAmelCase =use_labels _lowerCAmelCase =vocab_size _lowerCAmelCase =d_model _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads _lowerCAmelCase =ffn_dim _lowerCAmelCase =activation_function _lowerCAmelCase =activation_dropout _lowerCAmelCase =attention_dropout _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =initializer_range _lowerCAmelCase =None _lowerCAmelCase =0 _lowerCAmelCase =2 _lowerCAmelCase =1 def _lowerCAmelCase ( self ) -> Dict: return XGLMConfig.from_pretrained("""facebook/xglm-564M""" ) def _lowerCAmelCase ( self ) -> str: _lowerCAmelCase =tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) _lowerCAmelCase =None if self.use_input_mask: _lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase =self.get_config() _lowerCAmelCase =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def _lowerCAmelCase ( self ) -> str: return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__UpperCAmelCase , ) def _lowerCAmelCase ( self ) -> Dict: _lowerCAmelCase =self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) =config_and_inputs _lowerCAmelCase ={ """input_ids""": input_ids, """head_mask""": head_mask, } return config, inputs_dict @require_tf class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () lowerCamelCase = (TFXGLMForCausalLM,) if is_tf_available() else () lowerCamelCase = ( {'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =TFXGLMModelTester(self ) _lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase , n_embd=37 ) def _lowerCAmelCase ( self ) -> int: self.config_tester.run_common_tests() @slow def _lowerCAmelCase ( self ) -> Union[str, Any]: for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase =TFXGLMModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" ) def _lowerCAmelCase ( self ) -> Union[str, Any]: super().test_resize_token_embeddings() @require_tf class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def _lowerCAmelCase ( self , __UpperCAmelCase=True ) -> str: _lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase =tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off _lowerCAmelCase =[2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81] # fmt: on _lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCAmelCase ) @slow def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) tf.random.set_seed(0 ) _lowerCAmelCase =tokenizer("""Today is a nice day and""" , return_tensors="""tf""" ) _lowerCAmelCase =tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(""":/CPU:0""" ): _lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , seed=[7, 0] ) _lowerCAmelCase =tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =( """Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due""" ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) @slow def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase ="""left""" # use different length sentences to test batching _lowerCAmelCase =[ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When""", """Hello, my dog is a little""", ] _lowerCAmelCase =tokenizer(__UpperCAmelCase , return_tensors="""tf""" , padding=__UpperCAmelCase ) _lowerCAmelCase =inputs["""input_ids"""] _lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 ) _lowerCAmelCase =tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids _lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 ) _lowerCAmelCase =tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids _lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 ) _lowerCAmelCase =tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =[ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """ """a single""", """Hello, my dog is a little bit of a shy one, but he is very friendly""", ] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
341
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __A = logging.get_logger(__name__) __A = {'vocab_file': 'spiece.model'} __A = { 'vocab_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model', } } __A = { 'xlnet-base-cased': None, 'xlnet-large-cased': None, } # Segments (not really needed) __A = 0 __A = 1 __A = 2 __A = 3 __A = 4 class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = VOCAB_FILES_NAMES lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase = '''left''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<sep>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<cls>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<eop>", "<eod>"] , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token _lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , ) _lowerCAmelCase =3 _lowerCAmelCase =do_lower_case _lowerCAmelCase =remove_space _lowerCAmelCase =keep_accents _lowerCAmelCase =vocab_file _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCAmelCase ) @property def _lowerCAmelCase ( self ) -> str: return len(self.sp_model ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Optional[int]: _lowerCAmelCase =self.__dict__.copy() _lowerCAmelCase =None return state def __setstate__( self , __UpperCAmelCase ) -> Tuple: _lowerCAmelCase =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _lowerCAmelCase ={} _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[Any]: if self.remove_space: _lowerCAmelCase =""" """.join(inputs.strip().split() ) else: _lowerCAmelCase =inputs _lowerCAmelCase =outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" ) if not self.keep_accents: _lowerCAmelCase =unicodedata.normalize("""NFKD""" , __UpperCAmelCase ) _lowerCAmelCase ="""""".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] ) if self.do_lower_case: _lowerCAmelCase =outputs.lower() return outputs def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]: _lowerCAmelCase =self.preprocess_text(__UpperCAmelCase ) _lowerCAmelCase =self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase ) _lowerCAmelCase =[] for piece in pieces: if len(__UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): _lowerCAmelCase =self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , """""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: _lowerCAmelCase =cur_pieces[1:] else: _lowerCAmelCase =cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(__UpperCAmelCase ) else: new_pieces.append(__UpperCAmelCase ) return new_pieces def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]: return self.sp_model.PieceToId(__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]: return self.sp_model.IdToPiece(__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str: _lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip() return out_string def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> str: _lowerCAmelCase =kwargs.pop("""use_source_tokenizer""" , __UpperCAmelCase ) _lowerCAmelCase =self.convert_ids_to_tokens(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 _lowerCAmelCase =[] _lowerCAmelCase =[] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) ) _lowerCAmelCase =[] sub_texts.append(__UpperCAmelCase ) else: current_sub_text.append(__UpperCAmelCase ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens _lowerCAmelCase ="""""".join(__UpperCAmelCase ) _lowerCAmelCase =( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: _lowerCAmelCase =self.clean_up_tokenization(__UpperCAmelCase ) return clean_text else: return text def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _lowerCAmelCase =[self.sep_token_id] _lowerCAmelCase =[self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is not None: return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] return ([0] * len(__UpperCAmelCase )) + [1, 1] def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _lowerCAmelCase =[self.sep_token_id] _lowerCAmelCase =[2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _lowerCAmelCase =os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase , """wb""" ) as fi: _lowerCAmelCase =self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,)
341
1
"""simple docstring""" import random def _lowerCamelCase(__UpperCamelCase ) -> bool: _lowerCAmelCase =num - 1 _lowerCAmelCase =0 while s % 2 == 0: _lowerCAmelCase =s // 2 t += 1 for _ in range(5 ): _lowerCAmelCase =random.randrange(2 , num - 1 ) _lowerCAmelCase =pow(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) if v != 1: _lowerCAmelCase =0 while v != (num - 1): if i == t - 1: return False else: _lowerCAmelCase =i + 1 _lowerCAmelCase =(v**2) % num return True def _lowerCamelCase(__UpperCamelCase ) -> bool: if num < 2: return False _lowerCAmelCase =[ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997, ] if num in low_primes: return True for prime in low_primes: if (num % prime) == 0: return False return rabin_miller(__UpperCamelCase ) def _lowerCamelCase(__UpperCamelCase = 1024 ) -> int: while True: _lowerCAmelCase =random.randrange(2 ** (keysize - 1) , 2 ** (keysize) ) if is_prime_low_num(__UpperCamelCase ): return num if __name__ == "__main__": __A = generate_large_prime() print(('Prime number:', num)) print(('is_prime_low_num:', is_prime_low_num(num)))
341
"""simple docstring""" from __future__ import annotations def _lowerCamelCase(__UpperCamelCase ) -> bool: _lowerCAmelCase =str(__UpperCamelCase ) return n == n[::-1] def _lowerCamelCase(__UpperCamelCase = 1000000 ) -> str: _lowerCAmelCase =0 for i in range(1 , __UpperCamelCase ): if is_palindrome(__UpperCamelCase ) and is_palindrome(bin(__UpperCamelCase ).split("""b""" )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
341
1
"""simple docstring""" import darl # noqa import gym import tqdm from diffusers.experimental import ValueGuidedRLPipeline __A = { 'n_samples': 64, 'horizon': 32, 'num_inference_steps': 20, 'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network 'scale_grad_by_std': True, 'scale': 0.1, 'eta': 0.0, 't_grad_cutoff': 2, 'device': 'cpu', } if __name__ == "__main__": __A = 'hopper-medium-v2' __A = gym.make(env_name) __A = ValueGuidedRLPipeline.from_pretrained( 'bglick13/hopper-medium-v2-value-function-hor32', env=env, ) env.seed(0) __A = env.reset() __A = 0 __A = 0 __A = 1000 __A = [obs.copy()] try: for t in tqdm.tqdm(range(T)): # call the policy __A = pipeline(obs, planning_horizon=32) # execute action in environment __A , __A , __A , __A = env.step(denorm_actions) __A = env.get_normalized_score(total_reward) # update return total_reward += reward total_score += score print( F"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:""" F""" {total_score}""" ) # save observations for rendering rollout.append(next_observation.copy()) __A = next_observation except KeyboardInterrupt: pass print(F"""Total reward: {total_reward}""")
341
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = {} class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = '''llama''' lowerCamelCase = ['''past_key_values'''] def __init__( self , __UpperCAmelCase=3_20_00 , __UpperCAmelCase=40_96 , __UpperCAmelCase=1_10_08 , __UpperCAmelCase=32 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase="silu" , __UpperCAmelCase=20_48 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-6 , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=False , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]: _lowerCAmelCase =vocab_size _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =hidden_size _lowerCAmelCase =intermediate_size _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads # for backward compatibility if num_key_value_heads is None: _lowerCAmelCase =num_attention_heads _lowerCAmelCase =num_key_value_heads _lowerCAmelCase =hidden_act _lowerCAmelCase =initializer_range _lowerCAmelCase =rms_norm_eps _lowerCAmelCase =pretraining_tp _lowerCAmelCase =use_cache _lowerCAmelCase =rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , ) def _lowerCAmelCase ( self ) -> str: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ f'''got {self.rope_scaling}''' ) _lowerCAmelCase =self.rope_scaling.get("""type""" , __UpperCAmelCase ) _lowerCAmelCase =self.rope_scaling.get("""factor""" , __UpperCAmelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
341
1
"""simple docstring""" from __future__ import annotations def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> list: _lowerCAmelCase =[] _lowerCAmelCase , _lowerCAmelCase =input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) _lowerCAmelCase =result + left + right return input_list def _lowerCamelCase(__UpperCamelCase ) -> list: if len(__UpperCamelCase ) <= 1: return input_list _lowerCAmelCase =list(__UpperCamelCase ) # iteration for two-way merging _lowerCAmelCase =2 while p <= len(__UpperCamelCase ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(__UpperCamelCase ) , __UpperCamelCase ): _lowerCAmelCase =i _lowerCAmelCase =i + p - 1 _lowerCAmelCase =(low + high + 1) // 2 _lowerCAmelCase =merge(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # final merge of last two parts if p * 2 >= len(__UpperCamelCase ): _lowerCAmelCase =i _lowerCAmelCase =merge(__UpperCamelCase , 0 , __UpperCamelCase , len(__UpperCamelCase ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": __A = input('Enter numbers separated by a comma:\n').strip() if user_input == "": __A = [] else: __A = [int(item.strip()) for item in user_input.split(',')] print(iter_merge_sort(unsorted))
341
"""simple docstring""" import warnings from .generation import TFGenerationMixin class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' # warning at import time warnings.warn( '''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will ''' '''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , __magic_name__ , )
341
1
"""simple docstring""" import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase__ ( __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = FunnelTokenizer lowerCamelCase = FunnelTokenizerFast lowerCamelCase = True lowerCamelCase = True def _lowerCAmelCase ( self ) -> Tuple: super().setUp() _lowerCAmelCase =[ """<unk>""", """<cls>""", """<sep>""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] _lowerCAmelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def _lowerCAmelCase ( self , **__UpperCAmelCase ) -> List[str]: return FunnelTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def _lowerCAmelCase ( self , **__UpperCAmelCase ) -> Tuple: return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]: _lowerCAmelCase ="""UNwant\u00E9d,running""" _lowerCAmelCase ="""unwanted, running""" return input_text, output_text def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =self.tokenizer_class(self.vocab_file ) _lowerCAmelCase =tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(__UpperCAmelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [7, 4, 5, 10, 8, 9] ) def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase =self.get_tokenizers(do_lower_case=__UpperCAmelCase ) for tokenizer in tokenizers: _lowerCAmelCase =tokenizer("""UNwant\u00E9d,running""" ) _lowerCAmelCase =len(inputs["""input_ids"""] ) - 1 self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len ) _lowerCAmelCase =tokenizer("""UNwant\u00E9d,running""" , """UNwant\u00E9d,running""" ) self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len + [1] * sentence_len )
341
"""simple docstring""" import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class lowerCamelCase__ : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=16 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=30 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=None , ) -> Any: _lowerCAmelCase =parent _lowerCAmelCase =batch_size _lowerCAmelCase =decoder_seq_length # For common tests _lowerCAmelCase =self.decoder_seq_length _lowerCAmelCase =is_training _lowerCAmelCase =use_attention_mask _lowerCAmelCase =use_labels _lowerCAmelCase =vocab_size _lowerCAmelCase =d_model _lowerCAmelCase =d_model _lowerCAmelCase =decoder_layers _lowerCAmelCase =decoder_layers _lowerCAmelCase =decoder_ffn_dim _lowerCAmelCase =decoder_attention_heads _lowerCAmelCase =decoder_attention_heads _lowerCAmelCase =eos_token_id _lowerCAmelCase =bos_token_id _lowerCAmelCase =pad_token_id _lowerCAmelCase =decoder_start_token_id _lowerCAmelCase =use_cache _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =None _lowerCAmelCase =decoder_seq_length _lowerCAmelCase =2 _lowerCAmelCase =1 def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) _lowerCAmelCase =None if self.use_attention_mask: _lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) _lowerCAmelCase =None if self.use_labels: _lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) _lowerCAmelCase =TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[Any]: _lowerCAmelCase =True _lowerCAmelCase =TrOCRDecoder(config=__UpperCAmelCase ).to(__UpperCAmelCase ).eval() _lowerCAmelCase =input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass _lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase ) _lowerCAmelCase =model(__UpperCAmelCase ) _lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase ) self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) ) self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 ) _lowerCAmelCase =outputs["""past_key_values"""] # create hypothetical next token and extent to next_input_ids _lowerCAmelCase =ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and _lowerCAmelCase =torch.cat([input_ids, next_tokens] , dim=-1 ) _lowerCAmelCase =model(__UpperCAmelCase )["""last_hidden_state"""] _lowerCAmelCase =model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )["""last_hidden_state"""] # select random slice _lowerCAmelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item() _lowerCAmelCase =output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() _lowerCAmelCase =output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) def _lowerCAmelCase ( self ) -> List[str]: _lowerCAmelCase =self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs _lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_torch class lowerCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () lowerCamelCase = (TrOCRForCausalLM,) if is_torch_available() else () lowerCamelCase = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {} lowerCamelCase = True lowerCamelCase = False def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =TrOCRStandaloneDecoderModelTester(self , is_training=__UpperCAmelCase ) _lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> List[str]: pass def _lowerCAmelCase ( self ) -> List[Any]: pass def _lowerCAmelCase ( self ) -> Any: pass def _lowerCAmelCase ( self ) -> Optional[Any]: self.config_tester.run_common_tests() def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Tuple: return @unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :) def _lowerCAmelCase ( self ) -> str: pass
341
1
"""simple docstring""" import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path __A = [ {'dataset': 'wikipedia', 'config_name': '20220301.de'}, {'dataset': 'wikipedia', 'config_name': '20220301.en'}, {'dataset': 'wikipedia', 'config_name': '20220301.fr'}, {'dataset': 'wikipedia', 'config_name': '20220301.frr'}, {'dataset': 'wikipedia', 'config_name': '20220301.it'}, {'dataset': 'wikipedia', 'config_name': '20220301.simple'}, {'dataset': 'snli', 'config_name': 'plain_text'}, {'dataset': 'eli5', 'config_name': 'LFQA_reddit'}, {'dataset': 'wiki40b', 'config_name': 'en'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'}, {'dataset': 'natural_questions', 'config_name': 'default'}, ] def _lowerCamelCase(__UpperCamelCase=True ) -> Optional[Any]: if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__magic_name__ ) ) class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = None lowerCamelCase = None def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: with TemporaryDirectory() as tmp_dir: _lowerCAmelCase =dataset_module_factory(__UpperCAmelCase , cache_dir=__UpperCAmelCase ) _lowerCAmelCase =import_main_class(dataset_module.module_path , dataset=__UpperCAmelCase ) _lowerCAmelCase =builder_cls( cache_dir=__UpperCAmelCase , config_name=__UpperCAmelCase , hash=dataset_module.hash , ) _lowerCAmelCase ="""/""".join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=__UpperCAmelCase ).replace(os.sep , """/""" ), config.DATASET_INFO_FILENAME, ] ) _lowerCAmelCase =cached_path(__UpperCAmelCase , cache_dir=__UpperCAmelCase ) self.assertTrue(os.path.exists(__UpperCAmelCase ) ) @pytest.mark.integration def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]: _lowerCAmelCase =tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple""" _lowerCAmelCase =dataset_module_factory("""wikipedia""" , cache_dir=__UpperCamelCase ) _lowerCAmelCase =import_main_class(dataset_module.module_path ) _lowerCAmelCase =builder_cls( cache_dir=__UpperCamelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam _lowerCAmelCase =None builder_instance.download_and_prepare() _lowerCAmelCase =builder_instance.as_dataset() assert ds @pytest.mark.integration def _lowerCamelCase(__UpperCamelCase ) -> Union[str, Any]: _lowerCAmelCase =dataset_module_factory("""wikipedia""" , cache_dir=__UpperCamelCase ) _lowerCAmelCase =import_main_class(dataset_module.module_path , dataset=__UpperCamelCase ) _lowerCAmelCase =builder_cls( cache_dir=__UpperCamelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , ) _lowerCAmelCase =builder_instance.as_streaming_dataset() assert ds assert isinstance(__UpperCamelCase , __UpperCamelCase ) assert "train" in ds assert isinstance(ds["""train"""] , __UpperCamelCase ) assert next(iter(ds["""train"""] ) )
341
"""simple docstring""" import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' lowerCamelCase = JukeboxTokenizer lowerCamelCase = { '''artist''': '''Zac Brown Band''', '''genres''': '''Country''', '''lyrics''': '''I met a traveller from an antique land, Who said "Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away ''', } @require_torch def _lowerCAmelCase ( self ) -> str: import torch _lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" ) _lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""] # fmt: off _lowerCAmelCase =[ torch.tensor([[ 0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), torch.tensor([[0, 0, 0, 10_69, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def _lowerCAmelCase ( self ) -> Any: import torch _lowerCAmelCase =JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" ) _lowerCAmelCase =tokenizer(**self.metas )["""input_ids"""] # fmt: off _lowerCAmelCase =[ torch.tensor([[ 0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
341
1
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = ['''image_processor''', '''tokenizer'''] lowerCamelCase = '''CLIPImageProcessor''' lowerCamelCase = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''') def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Union[str, Any]: _lowerCAmelCase =None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __UpperCAmelCase , ) _lowerCAmelCase =kwargs.pop("""feature_extractor""" ) _lowerCAmelCase =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Optional[Any]: if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: _lowerCAmelCase =self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if images is not None: _lowerCAmelCase =self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if text is not None and images is not None: _lowerCAmelCase =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase ) def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @property def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =self.tokenizer.model_input_names _lowerCAmelCase =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
341
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __A = logging.get_logger(__name__) __A = '▁' __A = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'} __A = { 'vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model', }, 'monolingual_vocab_file': { 'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt', }, } __A = {'vinai/bartpho-syllable': 1024} class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = VOCAB_FILES_NAMES lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token _lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , ) _lowerCAmelCase =vocab_file _lowerCAmelCase =monolingual_vocab_file _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__UpperCAmelCase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility _lowerCAmelCase ={} _lowerCAmelCase =0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids: _lowerCAmelCase =cnt cnt += 1 with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f: for line in f.readlines(): _lowerCAmelCase =line.strip().split()[0] _lowerCAmelCase =len(self.fairseq_tokens_to_ids ) if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids: _lowerCAmelCase =len(self.fairseq_tokens_to_ids ) _lowerCAmelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> Dict: _lowerCAmelCase =self.__dict__.copy() _lowerCAmelCase =None _lowerCAmelCase =self.sp_model.serialized_model_proto() return state def __setstate__( self , __UpperCAmelCase ) -> List[Any]: _lowerCAmelCase =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _lowerCAmelCase ={} _lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _lowerCAmelCase =[self.cls_token_id] _lowerCAmelCase =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1] def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: _lowerCAmelCase =[self.sep_token_id] _lowerCAmelCase =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _lowerCAmelCase ( self ) -> Union[str, Any]: return len(self.fairseq_ids_to_tokens ) def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]: return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase ) def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]: return self.fairseq_ids_to_tokens[index] def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]: _lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip() return out_string def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]: if not os.path.isdir(__UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _lowerCAmelCase =os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) _lowerCAmelCase =os.path.join( __UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase , """wb""" ) as fi: _lowerCAmelCase =self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( __UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f'''{str(__UpperCAmelCase )} \n''' ) return out_vocab_file, out_monolingual_vocab_file
341
1
"""simple docstring""" import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = (UnCLIPScheduler,) def _lowerCAmelCase ( self , **__UpperCAmelCase ) -> Optional[int]: _lowerCAmelCase ={ """num_train_timesteps""": 10_00, """variance_type""": """fixed_small_log""", """clip_sample""": True, """clip_sample_range""": 1.0, """prediction_type""": """epsilon""", } config.update(**__UpperCAmelCase ) return config def _lowerCAmelCase ( self ) -> Tuple: for timesteps in [1, 5, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Any: for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Any: for clip_sample in [True, False]: self.check_over_configs(clip_sample=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Union[str, Any]: for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Dict: for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> int: for time_step in [0, 5_00, 9_99]: for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=__UpperCAmelCase , prev_timestep=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =self.scheduler_classes[0] _lowerCAmelCase =self.get_scheduler_config(variance_type="""fixed_small_log""" ) _lowerCAmelCase =scheduler_class(**__UpperCAmelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00e-10 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_5_4_9_6_2_5 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.9_9_9_4_9_8_7 ) ) < 1e-5 def _lowerCAmelCase ( self ) -> List[str]: _lowerCAmelCase =self.scheduler_classes[0] _lowerCAmelCase =self.get_scheduler_config(variance_type="""learned_range""" ) _lowerCAmelCase =scheduler_class(**__UpperCAmelCase ) _lowerCAmelCase =0.5 assert scheduler._get_variance(1 , predicted_variance=__UpperCAmelCase ) - -1_0.1_7_1_2_7_9_0 < 1e-5 assert scheduler._get_variance(4_87 , predicted_variance=__UpperCAmelCase ) - -5.7_9_9_8_0_5_2 < 1e-5 assert scheduler._get_variance(9_99 , predicted_variance=__UpperCAmelCase ) - -0.0_0_1_0_0_1_1 < 1e-5 def _lowerCAmelCase ( self ) -> List[str]: _lowerCAmelCase =self.scheduler_classes[0] _lowerCAmelCase =self.get_scheduler_config() _lowerCAmelCase =scheduler_class(**__UpperCAmelCase ) _lowerCAmelCase =scheduler.timesteps _lowerCAmelCase =self.dummy_model() _lowerCAmelCase =self.dummy_sample_deter _lowerCAmelCase =torch.manual_seed(0 ) for i, t in enumerate(__UpperCAmelCase ): # 1. predict noise residual _lowerCAmelCase =model(__UpperCAmelCase , __UpperCAmelCase ) # 2. predict previous mean of sample x_t-1 _lowerCAmelCase =scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample _lowerCAmelCase =pred_prev_sample _lowerCAmelCase =torch.sum(torch.abs(__UpperCAmelCase ) ) _lowerCAmelCase =torch.mean(torch.abs(__UpperCAmelCase ) ) assert abs(result_sum.item() - 2_5_2.2_6_8_2_4_9_5 ) < 1e-2 assert abs(result_mean.item() - 0.3_2_8_4_7_4_3 ) < 1e-3 def _lowerCAmelCase ( self ) -> Dict: _lowerCAmelCase =self.scheduler_classes[0] _lowerCAmelCase =self.get_scheduler_config() _lowerCAmelCase =scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(25 ) _lowerCAmelCase =scheduler.timesteps _lowerCAmelCase =self.dummy_model() _lowerCAmelCase =self.dummy_sample_deter _lowerCAmelCase =torch.manual_seed(0 ) for i, t in enumerate(__UpperCAmelCase ): # 1. predict noise residual _lowerCAmelCase =model(__UpperCAmelCase , __UpperCAmelCase ) if i + 1 == timesteps.shape[0]: _lowerCAmelCase =None else: _lowerCAmelCase =timesteps[i + 1] # 2. predict previous mean of sample x_t-1 _lowerCAmelCase =scheduler.step( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , prev_timestep=__UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample _lowerCAmelCase =pred_prev_sample _lowerCAmelCase =torch.sum(torch.abs(__UpperCAmelCase ) ) _lowerCAmelCase =torch.mean(torch.abs(__UpperCAmelCase ) ) assert abs(result_sum.item() - 2_5_8.2_0_4_4_9_8_3 ) < 1e-2 assert abs(result_mean.item() - 0.3_3_6_2_0_3_8 ) < 1e-3 def _lowerCAmelCase ( self ) -> Dict: pass def _lowerCAmelCase ( self ) -> List[str]: pass
341
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =1 _lowerCAmelCase =3 _lowerCAmelCase =(32, 32) _lowerCAmelCase =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase ) return image @property def _lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) _lowerCAmelCase =UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , ) return model @property def _lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) _lowerCAmelCase =AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) return model @property def _lowerCAmelCase ( self ) -> Optional[Any]: torch.manual_seed(0 ) _lowerCAmelCase =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , ) return CLIPTextModel(__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase =self.dummy_cond_unet_upscale _lowerCAmelCase =DDPMScheduler() _lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" ) _lowerCAmelCase =self.dummy_vae _lowerCAmelCase =self.dummy_text_encoder _lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk _lowerCAmelCase =StableDiffusionUpscalePipeline( unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , ) _lowerCAmelCase =sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase ="""A painting of a squirrel eating a burger""" _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) _lowerCAmelCase =output.images _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=__UpperCAmelCase , )[0] _lowerCAmelCase =image[0, -3:, -3:, -1] _lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1] _lowerCAmelCase =low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) _lowerCAmelCase =np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase =self.dummy_cond_unet_upscale _lowerCAmelCase =DDPMScheduler() _lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" ) _lowerCAmelCase =self.dummy_vae _lowerCAmelCase =self.dummy_text_encoder _lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk _lowerCAmelCase =StableDiffusionUpscalePipeline( unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , ) _lowerCAmelCase =sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase ="""A painting of a squirrel eating a burger""" _lowerCAmelCase =sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) _lowerCAmelCase =output.images assert image.shape[0] == 2 _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) _lowerCAmelCase =output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =self.dummy_cond_unet_upscale _lowerCAmelCase =DDPMScheduler() _lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" ) _lowerCAmelCase =self.dummy_vae _lowerCAmelCase =self.dummy_text_encoder _lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 _lowerCAmelCase =unet.half() _lowerCAmelCase =text_encoder.half() # make sure here that pndm scheduler skips prk _lowerCAmelCase =StableDiffusionUpscalePipeline( unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , ) _lowerCAmelCase =sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase ="""A painting of a squirrel eating a burger""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , ).images _lowerCAmelCase =low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) _lowerCAmelCase =load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat.npy""" ) _lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler""" _lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _lowerCAmelCase ="""a cat sitting on a park bench""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , ) _lowerCAmelCase =output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 1e-3 def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) _lowerCAmelCase =load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat_fp16.npy""" ) _lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler""" _lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _lowerCAmelCase ="""a cat sitting on a park bench""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , ) _lowerCAmelCase =output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 5e-1 def _lowerCAmelCase ( self ) -> Optional[Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _lowerCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) _lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler""" _lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() _lowerCAmelCase ="""a cat sitting on a park bench""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , output_type="""np""" , ) _lowerCAmelCase =torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
341
1
"""simple docstring""" from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING __A = logging.get_logger(__name__) @add_end_docstrings(__magic_name__ ) class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' def __init__( self , **__UpperCAmelCase ) -> Optional[int]: super().__init__(**__UpperCAmelCase ) if self.framework == "tf": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) requires_backends(self , """vision""" ) self.check_model_type(__UpperCAmelCase ) def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> str: if "text_queries" in kwargs: _lowerCAmelCase =kwargs.pop("""text_queries""" ) if isinstance(__UpperCAmelCase , (str, Image.Image) ): _lowerCAmelCase ={"""image""": image, """candidate_labels""": candidate_labels} else: _lowerCAmelCase =image _lowerCAmelCase =super().__call__(__UpperCAmelCase , **__UpperCAmelCase ) return results def _lowerCAmelCase ( self , **__UpperCAmelCase ) -> Union[str, Any]: _lowerCAmelCase ={} if "threshold" in kwargs: _lowerCAmelCase =kwargs["""threshold"""] if "top_k" in kwargs: _lowerCAmelCase =kwargs["""top_k"""] return {}, {}, postprocess_params def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Tuple: _lowerCAmelCase =load_image(inputs["""image"""] ) _lowerCAmelCase =inputs["""candidate_labels"""] if isinstance(__UpperCAmelCase , __UpperCAmelCase ): _lowerCAmelCase =candidate_labels.split(""",""" ) _lowerCAmelCase =torch.tensor([[image.height, image.width]] , dtype=torch.intaa ) for i, candidate_label in enumerate(__UpperCAmelCase ): _lowerCAmelCase =self.tokenizer(__UpperCAmelCase , return_tensors=self.framework ) _lowerCAmelCase =self.image_processor(__UpperCAmelCase , return_tensors=self.framework ) yield { "is_last": i == len(__UpperCAmelCase ) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[int]: _lowerCAmelCase =model_inputs.pop("""target_size""" ) _lowerCAmelCase =model_inputs.pop("""candidate_label""" ) _lowerCAmelCase =model_inputs.pop("""is_last""" ) _lowerCAmelCase =self.model(**__UpperCAmelCase ) _lowerCAmelCase ={"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs} return model_outputs def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0.1 , __UpperCAmelCase=None ) -> Tuple: _lowerCAmelCase =[] for model_output in model_outputs: _lowerCAmelCase =model_output["""candidate_label"""] _lowerCAmelCase =BaseModelOutput(__UpperCAmelCase ) _lowerCAmelCase =self.image_processor.post_process_object_detection( outputs=__UpperCAmelCase , threshold=__UpperCAmelCase , target_sizes=model_output["""target_size"""] )[0] for index in outputs["scores"].nonzero(): _lowerCAmelCase =outputs["""scores"""][index].item() _lowerCAmelCase =self._get_bounding_box(outputs["""boxes"""][index][0] ) _lowerCAmelCase ={"""score""": score, """label""": label, """box""": box} results.append(__UpperCAmelCase ) _lowerCAmelCase =sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x["score"] , reverse=__UpperCAmelCase ) if top_k: _lowerCAmelCase =results[:top_k] return results def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Dict[str, int]: if self.framework != "pt": raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =box.int().tolist() _lowerCAmelCase ={ """xmin""": xmin, """ymin""": ymin, """xmax""": xmax, """ymax""": ymax, } return bbox
341
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = '''cvt''' def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[64, 1_92, 3_84] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[4.0, 4.0, 4.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.1] , __UpperCAmelCase=[True, True, True] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , __UpperCAmelCase=[3, 3, 3] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-12 , **__UpperCAmelCase , ) -> Optional[Any]: super().__init__(**__UpperCAmelCase ) _lowerCAmelCase =num_channels _lowerCAmelCase =patch_sizes _lowerCAmelCase =patch_stride _lowerCAmelCase =patch_padding _lowerCAmelCase =embed_dim _lowerCAmelCase =num_heads _lowerCAmelCase =depth _lowerCAmelCase =mlp_ratio _lowerCAmelCase =attention_drop_rate _lowerCAmelCase =drop_rate _lowerCAmelCase =drop_path_rate _lowerCAmelCase =qkv_bias _lowerCAmelCase =cls_token _lowerCAmelCase =qkv_projection_method _lowerCAmelCase =kernel_qkv _lowerCAmelCase =padding_kv _lowerCAmelCase =stride_kv _lowerCAmelCase =padding_q _lowerCAmelCase =stride_q _lowerCAmelCase =initializer_range _lowerCAmelCase =layer_norm_eps
341
1
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase = 1000 ) -> int: _lowerCAmelCase =2**power _lowerCAmelCase =0 while n: _lowerCAmelCase , _lowerCAmelCase =r + n % 10, n // 10 return r if __name__ == "__main__": print(solution(int(str(input()).strip())))
341
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = ['''image_processor''', '''tokenizer'''] lowerCamelCase = '''CLIPImageProcessor''' lowerCamelCase = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''') def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Union[str, Any]: _lowerCAmelCase =None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __UpperCAmelCase , ) _lowerCAmelCase =kwargs.pop("""feature_extractor""" ) _lowerCAmelCase =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Optional[Any]: if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: _lowerCAmelCase =self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if images is not None: _lowerCAmelCase =self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if text is not None and images is not None: _lowerCAmelCase =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase ) def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @property def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =self.tokenizer.model_input_names _lowerCAmelCase =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
341
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'facebook/nllb-moe-54B': 'https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json', } class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = '''nllb-moe''' lowerCamelCase = ['''past_key_values'''] lowerCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self , __UpperCAmelCase=12_81_12 , __UpperCAmelCase=10_24 , __UpperCAmelCase=12 , __UpperCAmelCase=40_96 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=40_96 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0_5 , __UpperCAmelCase=0.0_5 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="relu" , __UpperCAmelCase=10_24 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="float32" , __UpperCAmelCase=False , __UpperCAmelCase=1_28 , __UpperCAmelCase=64 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=0.0_0_1 , __UpperCAmelCase=0.0_0_1 , __UpperCAmelCase="all" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=1.0 , __UpperCAmelCase=0.2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> Optional[int]: _lowerCAmelCase =vocab_size _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =d_model _lowerCAmelCase =encoder_ffn_dim _lowerCAmelCase =encoder_layers _lowerCAmelCase =encoder_attention_heads _lowerCAmelCase =decoder_ffn_dim _lowerCAmelCase =decoder_layers _lowerCAmelCase =decoder_attention_heads _lowerCAmelCase =dropout _lowerCAmelCase =attention_dropout _lowerCAmelCase =activation_dropout _lowerCAmelCase =activation_function _lowerCAmelCase =init_std _lowerCAmelCase =encoder_layerdrop _lowerCAmelCase =decoder_layerdrop _lowerCAmelCase =use_cache _lowerCAmelCase =encoder_layers _lowerCAmelCase =scale_embedding # scale factor will be sqrt(d_model) if True _lowerCAmelCase =router_z_loss_coef _lowerCAmelCase =router_aux_loss_coef _lowerCAmelCase =decoder_sparse_step _lowerCAmelCase =encoder_sparse_step _lowerCAmelCase =num_experts _lowerCAmelCase =expert_capacity _lowerCAmelCase =router_bias if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' ) _lowerCAmelCase =router_dtype _lowerCAmelCase =router_ignore_padding_tokens _lowerCAmelCase =batch_prioritized_routing _lowerCAmelCase =second_expert_policy _lowerCAmelCase =normalize_router_prob_before_dropping _lowerCAmelCase =moe_eval_capacity_token_fraction _lowerCAmelCase =moe_token_dropout _lowerCAmelCase =output_router_logits super().__init__( pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
341
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __A = { 'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'], 'tokenization_perceiver': ['PerceiverTokenizer'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['PerceiverFeatureExtractor'] __A = ['PerceiverImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PerceiverForImageClassificationConvProcessing', 'PerceiverForImageClassificationFourier', 'PerceiverForImageClassificationLearned', 'PerceiverForMaskedLM', 'PerceiverForMultimodalAutoencoding', 'PerceiverForOpticalFlow', 'PerceiverForSequenceClassification', 'PerceiverLayer', 'PerceiverModel', 'PerceiverPreTrainedModel', ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
1
"""simple docstring""" import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py __A = 'src/diffusers' # Matches is_xxx_available() __A = re.compile(r'is\_([a-z_]*)_available\(\)') # Matches from xxx import bla __A = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') __A = '\n{0} = None\n' __A = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n' __A = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n' def _lowerCamelCase(__UpperCamelCase ) -> List[str]: _lowerCAmelCase =_re_backend.findall(__UpperCamelCase ) if len(__UpperCamelCase ) == 0: return None return "_and_".join(__UpperCamelCase ) def _lowerCamelCase() -> int: with open(os.path.join(__UpperCamelCase , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: _lowerCAmelCase =f.readlines() # Get to the point we do the actual imports for type checking _lowerCAmelCase =0 _lowerCAmelCase ={} # Go through the end of the file while line_index < len(__UpperCamelCase ): # If the line contains is_backend_available, we grab all objects associated with the `else` block _lowerCAmelCase =find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith("""else:""" ): line_index += 1 line_index += 1 _lowerCAmelCase =[] # Until we unindent, add backend objects to the list while line_index < len(__UpperCamelCase ) and len(lines[line_index] ) > 1: _lowerCAmelCase =lines[line_index] _lowerCAmelCase =_re_single_line_import.search(__UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(__UpperCamelCase ) > 0: _lowerCAmelCase =objects else: line_index += 1 return backend_specific_objects def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Optional[int]: if name.isupper(): return DUMMY_CONSTANT.format(__UpperCamelCase ) elif name.islower(): return DUMMY_FUNCTION.format(__UpperCamelCase , __UpperCamelCase ) else: return DUMMY_CLASS.format(__UpperCamelCase , __UpperCamelCase ) def _lowerCamelCase(__UpperCamelCase=None ) -> str: if backend_specific_objects is None: _lowerCAmelCase =read_init() # For special correspondence backend to module name as used in the function requires_modulename _lowerCAmelCase ={} for backend, objects in backend_specific_objects.items(): _lowerCAmelCase ="""[""" + """, """.join(F'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]""" _lowerCAmelCase ="""# This file is autogenerated by the command `make fix-copies`, do not edit.\n""" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(__UpperCamelCase , __UpperCamelCase ) for o in objects] ) _lowerCAmelCase =dummy_file return dummy_files def _lowerCamelCase(__UpperCamelCase=False ) -> int: _lowerCAmelCase =create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py _lowerCAmelCase ={"""torch""": """pt"""} # Locate actual dummy modules and read their content. _lowerCAmelCase =os.path.join(__UpperCamelCase , """utils""" ) _lowerCAmelCase ={ backend: os.path.join(__UpperCamelCase , F'''dummy_{short_names.get(__UpperCamelCase , __UpperCamelCase )}_objects.py''' ) for backend in dummy_files.keys() } _lowerCAmelCase ={} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(__UpperCamelCase ): with open(__UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: _lowerCAmelCase =f.read() else: _lowerCAmelCase ="""""" for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( F'''Updating diffusers.utils.dummy_{short_names.get(__UpperCamelCase , __UpperCamelCase )}_objects.py as the main ''' """__init__ has new objects.""" ) with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(dummy_files[backend] ) else: raise ValueError( """The main __init__ has objects that are not present in """ F'''diffusers.utils.dummy_{short_names.get(__UpperCamelCase , __UpperCamelCase )}_objects.py. Run `make fix-copies` ''' """to fix this.""" ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') __A = parser.parse_args() check_dummies(args.fix_and_overwrite)
341
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A = { 'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Swinv2ForImageClassification', 'Swinv2ForMaskedImageModeling', 'Swinv2Model', 'Swinv2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
1
"""simple docstring""" import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) __A = '\\n Text data.\n Second line of data.' __A = 'file' @pytest.fixture(scope="""session""" ) def _lowerCamelCase(__UpperCamelCase ) -> Optional[int]: _lowerCAmelCase =tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""") _lowerCAmelCase =bytes(__UpperCamelCase , """utf-8""" ) with zstd.open(__UpperCamelCase , """wb""" ) as f: f.write(__UpperCamelCase ) return path @pytest.fixture def _lowerCamelCase(__UpperCamelCase ) -> List[Any]: with open(os.path.join(tmpfs.local_root_dir , __UpperCamelCase ) , """w""" ) as f: f.write(__UpperCamelCase ) return FILE_PATH @pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]: _lowerCAmelCase ={"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path} _lowerCAmelCase =input_paths[compression_format] _lowerCAmelCase =tmp_path / """cache""" _lowerCAmelCase =DownloadConfig(cache_dir=__UpperCamelCase , extract_compressed_file=__UpperCamelCase ) _lowerCAmelCase =cached_path(__UpperCamelCase , download_config=__UpperCamelCase ) with open(__UpperCamelCase ) as f: _lowerCAmelCase =f.read() with open(__UpperCamelCase ) as f: _lowerCAmelCase =f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize("""default_extracted""" , [True, False] ) @pytest.mark.parametrize("""default_cache_dir""" , [True, False] ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]: _lowerCAmelCase ="""custom_cache""" _lowerCAmelCase ="""custom_extracted_dir""" _lowerCAmelCase =tmp_path / """custom_extracted_path""" if default_extracted: _lowerCAmelCase =("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""") else: monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , __UpperCamelCase ) monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(__UpperCamelCase ) ) _lowerCAmelCase =custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) _lowerCAmelCase =xz_file _lowerCAmelCase =( DownloadConfig(extract_compressed_file=__UpperCamelCase ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__UpperCamelCase ) ) _lowerCAmelCase =cached_path(__UpperCamelCase , download_config=__UpperCamelCase ) assert Path(__UpperCamelCase ).parent.parts[-2:] == expected def _lowerCamelCase(__UpperCamelCase ) -> Tuple: # absolute path _lowerCAmelCase =str(Path(__UpperCamelCase ).resolve() ) assert cached_path(__UpperCamelCase ) == text_file # relative path _lowerCAmelCase =str(Path(__UpperCamelCase ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(__UpperCamelCase ) == text_file def _lowerCamelCase(__UpperCamelCase ) -> List[str]: # absolute path _lowerCAmelCase =str(tmp_path.resolve() / """__missing_file__.txt""" ) with pytest.raises(__UpperCamelCase ): cached_path(__UpperCamelCase ) # relative path _lowerCAmelCase ="""./__missing_file__.txt""" with pytest.raises(__UpperCamelCase ): cached_path(__UpperCamelCase ) def _lowerCamelCase(__UpperCamelCase ) -> int: _lowerCAmelCase =get_from_cache(F'''tmp://{tmpfs_file}''' ) with open(__UpperCamelCase ) as f: _lowerCAmelCase =f.read() assert output_file_content == FILE_CONTENT @patch("""datasets.config.HF_DATASETS_OFFLINE""" , __UpperCamelCase ) def _lowerCamelCase() -> List[str]: with pytest.raises(__UpperCamelCase ): cached_path("""https://huggingface.co""" ) @patch("""datasets.config.HF_DATASETS_OFFLINE""" , __UpperCamelCase ) def _lowerCamelCase(__UpperCamelCase ) -> Optional[int]: _lowerCAmelCase =tmp_path_factory.mktemp("""data""" ) / """file.html""" with pytest.raises(__UpperCamelCase ): http_get("""https://huggingface.co""" , temp_file=__UpperCamelCase ) with pytest.raises(__UpperCamelCase ): http_head("""https://huggingface.co""" ) @patch("""datasets.config.HF_DATASETS_OFFLINE""" , __UpperCamelCase ) def _lowerCamelCase(__UpperCamelCase ) -> str: _lowerCAmelCase =tmp_path_factory.mktemp("""data""" ) / """file.html""" with pytest.raises(__UpperCamelCase ): ftp_get("""ftp://huggingface.co""" , temp_file=__UpperCamelCase ) with pytest.raises(__UpperCamelCase ): ftp_head("""ftp://huggingface.co""" ) @patch("""datasets.config.HF_DATASETS_OFFLINE""" , __UpperCamelCase ) def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]: _lowerCAmelCase =tmp_path_factory.mktemp("""data""" ) / """file.html""" with pytest.raises(__UpperCamelCase ): fsspec_get("""s3://huggingface.co""" , temp_file=__UpperCamelCase ) with pytest.raises(__UpperCamelCase ): fsspec_head("""s3://huggingface.co""" )
341
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=1 ) -> Tuple: if n_shave_prefix_segments >= 0: return ".".join(path.split(""".""" )[n_shave_prefix_segments:] ) else: return ".".join(path.split(""".""" )[:n_shave_prefix_segments] ) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> List[str]: _lowerCAmelCase =[] for old_item in old_list: _lowerCAmelCase =old_item.replace("""in_layers.0""" , """norm1""" ) _lowerCAmelCase =new_item.replace("""in_layers.2""" , """conv1""" ) _lowerCAmelCase =new_item.replace("""out_layers.0""" , """norm2""" ) _lowerCAmelCase =new_item.replace("""out_layers.3""" , """conv2""" ) _lowerCAmelCase =new_item.replace("""emb_layers.1""" , """time_emb_proj""" ) _lowerCAmelCase =new_item.replace("""skip_connection""" , """conv_shortcut""" ) _lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=0 ) -> Tuple: _lowerCAmelCase =[] for old_item in old_list: _lowerCAmelCase =old_item _lowerCAmelCase =new_item.replace("""norm.weight""" , """group_norm.weight""" ) _lowerCAmelCase =new_item.replace("""norm.bias""" , """group_norm.bias""" ) _lowerCAmelCase =new_item.replace("""proj_out.weight""" , """proj_attn.weight""" ) _lowerCAmelCase =new_item.replace("""proj_out.bias""" , """proj_attn.bias""" ) _lowerCAmelCase =shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase ) mapping.append({"""old""": old_item, """new""": new_item} ) return mapping def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None ) -> Optional[int]: assert isinstance(__UpperCamelCase , __UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): _lowerCAmelCase =old_checkpoint[path] _lowerCAmelCase =old_tensor.shape[0] // 3 _lowerCAmelCase =(-1, channels) if len(old_tensor.shape ) == 3 else (-1) _lowerCAmelCase =old_tensor.shape[0] // config["""num_head_channels"""] // 3 _lowerCAmelCase =old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =old_tensor.split(channels // num_heads , dim=1 ) _lowerCAmelCase =query.reshape(__UpperCamelCase ) _lowerCAmelCase =key.reshape(__UpperCamelCase ) _lowerCAmelCase =value.reshape(__UpperCamelCase ) for path in paths: _lowerCAmelCase =path["""new"""] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here _lowerCAmelCase =new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" ) _lowerCAmelCase =new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" ) _lowerCAmelCase =new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" ) if additional_replacements is not None: for replacement in additional_replacements: _lowerCAmelCase =new_path.replace(replacement["""old"""] , replacement["""new"""] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: _lowerCAmelCase =old_checkpoint[path["""old"""]][:, :, 0] else: _lowerCAmelCase =old_checkpoint[path["""old"""]] def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: _lowerCAmelCase ={} _lowerCAmelCase =checkpoint["""time_embed.0.weight"""] _lowerCAmelCase =checkpoint["""time_embed.0.bias"""] _lowerCAmelCase =checkpoint["""time_embed.2.weight"""] _lowerCAmelCase =checkpoint["""time_embed.2.bias"""] _lowerCAmelCase =checkpoint["""input_blocks.0.0.weight"""] _lowerCAmelCase =checkpoint["""input_blocks.0.0.bias"""] _lowerCAmelCase =checkpoint["""out.0.weight"""] _lowerCAmelCase =checkpoint["""out.0.bias"""] _lowerCAmelCase =checkpoint["""out.2.weight"""] _lowerCAmelCase =checkpoint["""out.2.bias"""] # Retrieves the keys for the input blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } # Retrieves the keys for the middle blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } # Retrieves the keys for the output blocks only _lowerCAmelCase =len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} ) _lowerCAmelCase ={ layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } for i in range(1 , __UpperCamelCase ): _lowerCAmelCase =(i - 1) // (config["""num_res_blocks"""] + 1) _lowerCAmelCase =(i - 1) % (config["""num_res_blocks"""] + 1) _lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key] _lowerCAmelCase =[key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key] if F'''input_blocks.{i}.0.op.weight''' in checkpoint: _lowerCAmelCase =checkpoint[ F'''input_blocks.{i}.0.op.weight''' ] _lowerCAmelCase =checkpoint[ F'''input_blocks.{i}.0.op.bias''' ] continue _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase ={"""old""": F'''input_blocks.{i}.0''', """new""": F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''} _lowerCAmelCase ={"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""} assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=__UpperCamelCase ) if len(__UpperCamelCase ): _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """old""": F'''input_blocks.{i}.1''', """new""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''', } _lowerCAmelCase ={ F'''input_blocks.{i}.1.qkv.bias''': { """key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', """query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', """value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''input_blocks.{i}.1.qkv.weight''': { """key""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', """query""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', """value""": F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase , ) _lowerCAmelCase =middle_blocks[0] _lowerCAmelCase =middle_blocks[1] _lowerCAmelCase =middle_blocks[2] _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase ) _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase ) _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """middle_block.1.qkv.bias""": { """key""": """mid_block.attentions.0.key.bias""", """query""": """mid_block.attentions.0.query.bias""", """value""": """mid_block.attentions.0.value.bias""", }, """middle_block.1.qkv.weight""": { """key""": """mid_block.attentions.0.key.weight""", """query""": """mid_block.attentions.0.query.weight""", """value""": """mid_block.attentions.0.value.weight""", }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase ) for i in range(__UpperCamelCase ): _lowerCAmelCase =i // (config["""num_res_blocks"""] + 1) _lowerCAmelCase =i % (config["""num_res_blocks"""] + 1) _lowerCAmelCase =[shave_segments(__UpperCamelCase , 2 ) for name in output_blocks[i]] _lowerCAmelCase ={} for layer in output_block_layers: _lowerCAmelCase , _lowerCAmelCase =layer.split(""".""" )[0], shave_segments(__UpperCamelCase , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(__UpperCamelCase ) else: _lowerCAmelCase =[layer_name] if len(__UpperCamelCase ) > 1: _lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key] _lowerCAmelCase =[key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key] _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase ) _lowerCAmelCase ={"""old""": F'''output_blocks.{i}.0''', """new""": F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''} assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): _lowerCAmelCase =list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] ) _lowerCAmelCase =checkpoint[ F'''output_blocks.{i}.{index}.conv.weight''' ] _lowerCAmelCase =checkpoint[ F'''output_blocks.{i}.{index}.conv.bias''' ] # Clear attentions as they have been attributed above. if len(__UpperCamelCase ) == 2: _lowerCAmelCase =[] if len(__UpperCamelCase ): _lowerCAmelCase =renew_attention_paths(__UpperCamelCase ) _lowerCAmelCase ={ """old""": F'''output_blocks.{i}.1''', """new""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''', } _lowerCAmelCase ={ F'''output_blocks.{i}.1.qkv.bias''': { """key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', """query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', """value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''output_blocks.{i}.1.qkv.weight''': { """key""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', """query""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', """value""": F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=__UpperCamelCase , ) else: _lowerCAmelCase =renew_resnet_paths(__UpperCamelCase , n_shave_prefix_segments=1 ) for path in resnet_0_paths: _lowerCAmelCase =""".""".join(["""output_blocks""", str(__UpperCamelCase ), path["""old"""]] ) _lowerCAmelCase =""".""".join(["""up_blocks""", str(__UpperCamelCase ), """resnets""", str(__UpperCamelCase ), path["""new"""]] ) _lowerCAmelCase =checkpoint[old_path] return new_checkpoint if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the architecture.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') __A = parser.parse_args() __A = torch.load(args.checkpoint_path) with open(args.config_file) as f: __A = json.loads(f.read()) __A = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] __A = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: __A = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1])) __A = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1])) __A = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
341
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'facebook/xlm-roberta-xl': 'https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json', 'facebook/xlm-roberta-xxl': 'https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json', # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = '''xlm-roberta-xl''' def __init__( self , __UpperCAmelCase=25_08_80 , __UpperCAmelCase=25_60 , __UpperCAmelCase=36 , __UpperCAmelCase=32 , __UpperCAmelCase=1_02_40 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_14 , __UpperCAmelCase=1 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1e-05 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> Optional[Any]: super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) _lowerCAmelCase =vocab_size _lowerCAmelCase =hidden_size _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads _lowerCAmelCase =hidden_act _lowerCAmelCase =intermediate_size _lowerCAmelCase =hidden_dropout_prob _lowerCAmelCase =attention_probs_dropout_prob _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =type_vocab_size _lowerCAmelCase =initializer_range _lowerCAmelCase =layer_norm_eps _lowerCAmelCase =position_embedding_type _lowerCAmelCase =use_cache _lowerCAmelCase =classifier_dropout class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' @property def _lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _lowerCAmelCase ={0: """batch""", 1: """choice""", 2: """sequence"""} else: _lowerCAmelCase ={0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
341
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase ) -> Optional[Any]: _lowerCAmelCase =0 _lowerCAmelCase =len(__UpperCamelCase ) for i in range(n - 1 ): for j in range(i + 1 , __UpperCamelCase ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def _lowerCamelCase(__UpperCamelCase ) -> List[Any]: if len(__UpperCamelCase ) <= 1: return arr, 0 _lowerCAmelCase =len(__UpperCamelCase ) // 2 _lowerCAmelCase =arr[0:mid] _lowerCAmelCase =arr[mid:] _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =_count_cross_inversions(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =inversion_p + inversions_q + cross_inversions return c, num_inversions def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Any: _lowerCAmelCase =[] _lowerCAmelCase =_lowerCAmelCase =_lowerCAmelCase =0 while i < len(__UpperCamelCase ) and j < len(__UpperCamelCase ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(__UpperCamelCase ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(__UpperCamelCase ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def _lowerCamelCase() -> str: _lowerCAmelCase =[10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) _lowerCAmelCase =count_inversions_bf(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 8 print("""number of inversions = """ , __UpperCamelCase ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() _lowerCAmelCase =count_inversions_bf(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , __UpperCamelCase ) # an empty list should also have zero inversions _lowerCAmelCase =[] _lowerCAmelCase =count_inversions_bf(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =count_inversions_recursive(__UpperCamelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , __UpperCamelCase ) if __name__ == "__main__": main()
341
1
"""simple docstring""" import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class lowerCamelCase__ : '''simple docstring''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=99 , __UpperCAmelCase=13 , __UpperCAmelCase=16 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=30 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=None , ) -> Any: _lowerCAmelCase =parent _lowerCAmelCase =batch_size _lowerCAmelCase =decoder_seq_length # For common tests _lowerCAmelCase =self.decoder_seq_length _lowerCAmelCase =is_training _lowerCAmelCase =use_attention_mask _lowerCAmelCase =use_labels _lowerCAmelCase =vocab_size _lowerCAmelCase =d_model _lowerCAmelCase =d_model _lowerCAmelCase =decoder_layers _lowerCAmelCase =decoder_layers _lowerCAmelCase =decoder_ffn_dim _lowerCAmelCase =decoder_attention_heads _lowerCAmelCase =decoder_attention_heads _lowerCAmelCase =eos_token_id _lowerCAmelCase =bos_token_id _lowerCAmelCase =pad_token_id _lowerCAmelCase =decoder_start_token_id _lowerCAmelCase =use_cache _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =None _lowerCAmelCase =decoder_seq_length _lowerCAmelCase =2 _lowerCAmelCase =1 def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) _lowerCAmelCase =None if self.use_attention_mask: _lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) _lowerCAmelCase =None if self.use_labels: _lowerCAmelCase =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) _lowerCAmelCase =TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[Any]: _lowerCAmelCase =True _lowerCAmelCase =TrOCRDecoder(config=__UpperCAmelCase ).to(__UpperCAmelCase ).eval() _lowerCAmelCase =input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass _lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase ) _lowerCAmelCase =model(__UpperCAmelCase ) _lowerCAmelCase =model(__UpperCAmelCase , use_cache=__UpperCAmelCase ) self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) ) self.parent.assertTrue(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) + 1 ) _lowerCAmelCase =outputs["""past_key_values"""] # create hypothetical next token and extent to next_input_ids _lowerCAmelCase =ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and _lowerCAmelCase =torch.cat([input_ids, next_tokens] , dim=-1 ) _lowerCAmelCase =model(__UpperCAmelCase )["""last_hidden_state"""] _lowerCAmelCase =model(__UpperCAmelCase , past_key_values=__UpperCAmelCase )["""last_hidden_state"""] # select random slice _lowerCAmelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item() _lowerCAmelCase =output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() _lowerCAmelCase =output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) def _lowerCAmelCase ( self ) -> List[str]: _lowerCAmelCase =self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs _lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_torch class lowerCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () lowerCamelCase = (TrOCRForCausalLM,) if is_torch_available() else () lowerCamelCase = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {} lowerCamelCase = True lowerCamelCase = False def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =TrOCRStandaloneDecoderModelTester(self , is_training=__UpperCAmelCase ) _lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> List[str]: pass def _lowerCAmelCase ( self ) -> List[Any]: pass def _lowerCAmelCase ( self ) -> Any: pass def _lowerCAmelCase ( self ) -> Optional[Any]: self.config_tester.run_common_tests() def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Tuple: return @unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :) def _lowerCAmelCase ( self ) -> str: pass
341
"""simple docstring""" import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class lowerCamelCase__ : '''simple docstring''' lowerCamelCase = None lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = None lowerCamelCase = None lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = True lowerCamelCase = None lowerCamelCase = 1 lowerCamelCase = None lowerCamelCase = False lowerCamelCase = None lowerCamelCase = None def _lowerCAmelCase ( self ) -> "DownloadConfig": return self.__class__(**{k: copy.deepcopy(__UpperCAmelCase ) for k, v in self.__dict__.items()} )
341
1
"""simple docstring""" from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=__magic_name__ ) class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) lowerCamelCase = Features({'''text''': Value('''string''' )} ) lowerCamelCase = Features({} ) lowerCamelCase = "text" @property def _lowerCAmelCase ( self ) -> Dict[str, str]: return {self.text_column: "text"}
341
"""simple docstring""" def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> int: return int((input_a, input_a).count(1 ) != 0 ) def _lowerCamelCase() -> None: assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
341
1
"""simple docstring""" import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger(__name__) __A = [ ['attention', 'attn'], ['encoder_attention', 'encoder_attn'], ['q_lin', 'q_proj'], ['k_lin', 'k_proj'], ['v_lin', 'v_proj'], ['out_lin', 'out_proj'], ['norm_embeddings', 'layernorm_embedding'], ['position_embeddings', 'embed_positions'], ['embeddings', 'embed_tokens'], ['ffn.lin', 'fc'], ] def _lowerCamelCase(__UpperCamelCase ) -> List[str]: if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: _lowerCAmelCase =k.replace(__UpperCamelCase , __UpperCamelCase ) if k.startswith("""encoder""" ): _lowerCAmelCase =k.replace(""".attn""" , """.self_attn""" ) _lowerCAmelCase =k.replace("""norm1""" , """self_attn_layer_norm""" ) _lowerCAmelCase =k.replace("""norm2""" , """final_layer_norm""" ) elif k.startswith("""decoder""" ): _lowerCAmelCase =k.replace("""norm1""" , """self_attn_layer_norm""" ) _lowerCAmelCase =k.replace("""norm2""" , """encoder_attn_layer_norm""" ) _lowerCAmelCase =k.replace("""norm3""" , """final_layer_norm""" ) return k def _lowerCamelCase(__UpperCamelCase ) -> Union[str, Any]: _lowerCAmelCase =[ """model.encoder.layernorm_embedding.weight""", """model.encoder.layernorm_embedding.bias""", """model.decoder.layernorm_embedding.weight""", """model.decoder.layernorm_embedding.bias""", ] for k in keys: _lowerCAmelCase =sd.pop(__UpperCamelCase ) _lowerCAmelCase =k.replace("""layernorm_embedding""" , """layer_norm""" ) assert new_k not in sd _lowerCAmelCase =v __A = ['START'] @torch.no_grad() def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]: _lowerCAmelCase =torch.load(__UpperCamelCase , map_location="""cpu""" ) _lowerCAmelCase =model["""model"""] _lowerCAmelCase =BlenderbotConfig.from_json_file(__UpperCamelCase ) _lowerCAmelCase =BlenderbotForConditionalGeneration(__UpperCamelCase ) _lowerCAmelCase =m.model.state_dict().keys() _lowerCAmelCase =[] _lowerCAmelCase ={} for k, v in sd.items(): if k in IGNORE_KEYS: continue _lowerCAmelCase =rename_state_dict_key(__UpperCamelCase ) if new_k not in valid_keys: failures.append([k, new_k] ) else: _lowerCAmelCase =v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(__UpperCamelCase ) m.model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase ) m.half() m.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin') parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.') parser.add_argument( '--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use' ) __A = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
341
"""simple docstring""" import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py __A = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n' __A = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n' __A = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): '''simple docstring''' def _lowerCAmelCase ( self ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[ """https://en.wikipedia.org/wiki/BLEU""", """https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""", ] , ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=4 , __UpperCAmelCase=False ) -> Tuple: _lowerCAmelCase =compute_bleu( reference_corpus=__UpperCAmelCase , translation_corpus=__UpperCAmelCase , max_order=__UpperCAmelCase , smooth=__UpperCAmelCase ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) =score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
341
1
"""simple docstring""" import logging from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import arg_to_scheduler from transformers import TrainingArguments __A = logging.getLogger(__name__) @dataclass class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = field( default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} ) lowerCamelCase = field(default=__magic_name__ , metadata={'''help''': '''Whether to SortishSamler or not.'''} ) lowerCamelCase = field( default=__magic_name__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} ) lowerCamelCase = field(default=__magic_name__ , metadata={'''help''': '''whether to use adafactor'''} ) lowerCamelCase = field( default=__magic_name__ , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} ) lowerCamelCase = field( default=__magic_name__ , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} ) lowerCamelCase = field(default=__magic_name__ , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} ) lowerCamelCase = field( default=__magic_name__ , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} ) lowerCamelCase = field( default='''linear''' , metadata={'''help''': F"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
341
"""simple docstring""" import argparse from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--original_config_file', type=str, required=True, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--image_size', default=512, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') def _lowerCamelCase(__UpperCamelCase ) -> List[str]: if string == "True": return True elif string == "False": return False else: raise ValueError(F'''could not parse string as bool {string}''' ) parser.add_argument( '--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool ) parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int) __A = parser.parse_args() __A = download_controlnet_from_original_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, extract_ema=args.extract_ema, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, use_linear_projection=args.use_linear_projection, cross_attention_dim=args.cross_attention_dim, ) controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
341
1
"""simple docstring""" import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class lowerCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = StableUnCLIPPipeline lowerCamelCase = TEXT_TO_IMAGE_PARAMS lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false lowerCamelCase = False def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase =32 _lowerCAmelCase =embedder_hidden_size # prior components torch.manual_seed(0 ) _lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) _lowerCAmelCase =CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__UpperCAmelCase , projection_dim=__UpperCAmelCase , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) _lowerCAmelCase =PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__UpperCAmelCase , num_layers=1 , ) torch.manual_seed(0 ) _lowerCAmelCase =DDPMScheduler( variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=10_00 , clip_sample=__UpperCAmelCase , clip_sample_range=5.0 , beta_schedule="""squaredcos_cap_v2""" , ) # regular denoising components torch.manual_seed(0 ) _lowerCAmelCase =StableUnCLIPImageNormalizer(embedding_dim=__UpperCAmelCase ) _lowerCAmelCase =DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" ) torch.manual_seed(0 ) _lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) _lowerCAmelCase =CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=__UpperCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) _lowerCAmelCase =UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__UpperCAmelCase , layers_per_block=1 , upcast_attention=__UpperCAmelCase , use_linear_projection=__UpperCAmelCase , ) torch.manual_seed(0 ) _lowerCAmelCase =DDIMScheduler( beta_schedule="""scaled_linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type="""v_prediction""" , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , ) torch.manual_seed(0 ) _lowerCAmelCase =AutoencoderKL() _lowerCAmelCase ={ # prior components """prior_tokenizer""": prior_tokenizer, """prior_text_encoder""": prior_text_encoder, """prior""": prior, """prior_scheduler""": prior_scheduler, # image noising components """image_normalizer""": image_normalizer, """image_noising_scheduler""": image_noising_scheduler, # regular denoising components """tokenizer""": tokenizer, """text_encoder""": text_encoder, """unet""": unet, """scheduler""": scheduler, """vae""": vae, } return components def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ) -> Dict: if str(__UpperCAmelCase ).startswith("""mps""" ): _lowerCAmelCase =torch.manual_seed(__UpperCAmelCase ) else: _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) _lowerCAmelCase ={ """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """prior_num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def _lowerCAmelCase ( self ) -> str: _lowerCAmelCase =torch_device == """cpu""" self._test_attention_slicing_forward_pass(test_max_difference=__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =torch_device in ["""cpu""", """mps"""] self._test_inference_batch_single_identical(test_max_difference=__UpperCAmelCase ) @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self ) -> List[str]: _lowerCAmelCase =load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy""" ) _lowerCAmelCase =StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _lowerCAmelCase =torch.Generator(device="""cpu""" ).manual_seed(0 ) _lowerCAmelCase =pipe("""anime turle""" , generator=__UpperCAmelCase , output_type="""np""" ) _lowerCAmelCase =output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Tuple: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _lowerCAmelCase =StableUnCLIPPipeline.from_pretrained("""fusing/stable-unclip-2-1-l""" , torch_dtype=torch.floataa ) _lowerCAmelCase =pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _lowerCAmelCase =pipe( """anime turtle""" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="""np""" , ) _lowerCAmelCase =torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
341
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available __A = { 'configuration_audio_spectrogram_transformer': [ 'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ASTConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'ASTForAudioClassification', 'ASTModel', 'ASTPreTrainedModel', ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = ['ASTFeatureExtractor'] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
1
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import ( AudioDiffusionPipeline, AutoencoderKL, DDIMScheduler, DDPMScheduler, DiffusionPipeline, Mel, UNetaDConditionModel, UNetaDModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) _lowerCAmelCase =UNetaDModel( sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , ) return model @property def _lowerCAmelCase ( self ) -> Optional[int]: torch.manual_seed(0 ) _lowerCAmelCase =UNetaDConditionModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=10 , ) return model @property def _lowerCAmelCase ( self ) -> Optional[int]: torch.manual_seed(0 ) _lowerCAmelCase =AutoencoderKL( sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , ) _lowerCAmelCase =UNetaDModel( sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , ) return vqvae, unet @slow def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase =Mel( x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , ) _lowerCAmelCase =DDPMScheduler() _lowerCAmelCase =AudioDiffusionPipeline(vqvae=__UpperCAmelCase , unet=self.dummy_unet , mel=__UpperCAmelCase , scheduler=__UpperCAmelCase ) _lowerCAmelCase =pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(42 ) _lowerCAmelCase =pipe(generator=__UpperCAmelCase , steps=4 ) _lowerCAmelCase =output.audios[0] _lowerCAmelCase =output.images[0] _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(42 ) _lowerCAmelCase =pipe(generator=__UpperCAmelCase , steps=4 , return_dict=__UpperCAmelCase ) _lowerCAmelCase =output[0][0] assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length) assert ( image.height == self.dummy_unet.config.sample_size[0] and image.width == self.dummy_unet.config.sample_size[1] ) _lowerCAmelCase =np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10] _lowerCAmelCase =np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:10] _lowerCAmelCase =np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0 _lowerCAmelCase =Mel( x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , ) _lowerCAmelCase =DDIMScheduler() _lowerCAmelCase =self.dummy_vqvae_and_unet _lowerCAmelCase =AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=__UpperCAmelCase , scheduler=__UpperCAmelCase ) _lowerCAmelCase =pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) np.random.seed(0 ) _lowerCAmelCase =np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) ) _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(42 ) _lowerCAmelCase =pipe(raw_audio=__UpperCAmelCase , generator=__UpperCAmelCase , start_step=5 , steps=10 ) _lowerCAmelCase =output.images[0] assert ( image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0] and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1] ) _lowerCAmelCase =np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10] _lowerCAmelCase =np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 _lowerCAmelCase =self.dummy_unet_condition _lowerCAmelCase =AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=__UpperCAmelCase , mel=__UpperCAmelCase , scheduler=__UpperCAmelCase ) _lowerCAmelCase =pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) np.random.seed(0 ) _lowerCAmelCase =torch.rand((1, 1, 10) ) _lowerCAmelCase =pipe(generator=__UpperCAmelCase , encoding=__UpperCAmelCase ) _lowerCAmelCase =output.images[0] _lowerCAmelCase =np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10] _lowerCAmelCase =np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self ) -> List[Any]: _lowerCAmelCase =torch_device _lowerCAmelCase =DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" ) _lowerCAmelCase =pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(42 ) _lowerCAmelCase =pipe(generator=__UpperCAmelCase ) _lowerCAmelCase =output.audios[0] _lowerCAmelCase =output.images[0] assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length) assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1] _lowerCAmelCase =np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10] _lowerCAmelCase =np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
341
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __A = { 'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'], 'tokenization_m2m_100': ['M2M100Tokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ 'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST', 'M2M100ForConditionalGeneration', 'M2M100Model', 'M2M100PreTrainedModel', ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
1
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __A = 16 __A = 32 def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase = 16 ) -> Optional[Any]: _lowerCAmelCase =AutoTokenizer.from_pretrained("""bert-base-cased""" ) _lowerCAmelCase =load_dataset("""glue""" , """mrpc""" ) def tokenize_function(__UpperCamelCase ): # max_length=None => use the model max length (it's actually the default) _lowerCAmelCase =tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCamelCase , max_length=__UpperCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _lowerCAmelCase =datasets.map( __UpperCamelCase , batched=__UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _lowerCAmelCase =tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__UpperCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. _lowerCAmelCase =128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _lowerCAmelCase =16 elif accelerator.mixed_precision != "no": _lowerCAmelCase =8 else: _lowerCAmelCase =None return tokenizer.pad( __UpperCamelCase , padding="""longest""" , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors="""pt""" , ) # Instantiate dataloaders. _lowerCAmelCase =DataLoader( tokenized_datasets["""train"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase ) _lowerCAmelCase =DataLoader( tokenized_datasets["""validation"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders __A = mocked_dataloaders # noqa: F811 def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> List[str]: # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __UpperCamelCase ) == "1": _lowerCAmelCase =2 # New Code # _lowerCAmelCase =int(args.gradient_accumulation_steps ) # Initialize accelerator _lowerCAmelCase =Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__UpperCamelCase ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( """Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _lowerCAmelCase =config["""lr"""] _lowerCAmelCase =int(config["""num_epochs"""] ) _lowerCAmelCase =int(config["""seed"""] ) _lowerCAmelCase =int(config["""batch_size"""] ) _lowerCAmelCase =evaluate.load("""glue""" , """mrpc""" ) set_seed(__UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =get_dataloaders(__UpperCamelCase , __UpperCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _lowerCAmelCase =AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__UpperCamelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _lowerCAmelCase =model.to(accelerator.device ) # Instantiate optimizer _lowerCAmelCase =AdamW(params=model.parameters() , lr=__UpperCamelCase ) # Instantiate scheduler _lowerCAmelCase =get_linear_schedule_with_warmup( optimizer=__UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =accelerator.prepare( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # Now we train the model for epoch in range(__UpperCamelCase ): model.train() for step, batch in enumerate(__UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(__UpperCamelCase ): _lowerCAmelCase =model(**__UpperCamelCase ) _lowerCAmelCase =output.loss accelerator.backward(__UpperCamelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _lowerCAmelCase =model(**__UpperCamelCase ) _lowerCAmelCase =outputs.logits.argmax(dim=-1 ) _lowerCAmelCase , _lowerCAmelCase =accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=__UpperCamelCase , references=__UpperCamelCase , ) _lowerCAmelCase =metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , __UpperCamelCase ) def _lowerCamelCase() -> Dict: _lowerCAmelCase =argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=__UpperCamelCase , default=__UpperCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) # New Code # parser.add_argument( """--gradient_accumulation_steps""" , type=__UpperCamelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) _lowerCAmelCase =parser.parse_args() _lowerCAmelCase ={"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(__UpperCamelCase , __UpperCamelCase ) if __name__ == "__main__": main()
341
"""simple docstring""" import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets __A = datasets.logging.get_logger(__name__) __A = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n' __A = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n' __A = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n' def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="dummy_doc" ) -> Dict: _lowerCAmelCase ={doc: key_lines} _lowerCAmelCase ={doc: sys_lines} _lowerCAmelCase ={} _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase =0 _lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase ) key_singletons_num += singletons_num if NP_only or min_span: _lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase , _lowerCAmelCase =reader.get_doc_mentions(__UpperCamelCase , sys_doc_lines[doc] , __UpperCamelCase ) sys_singletons_num += singletons_num if NP_only or min_span: _lowerCAmelCase =reader.set_annotated_parse_trees(__UpperCamelCase , key_doc_lines[doc] , __UpperCamelCase , __UpperCamelCase ) if remove_nested: _lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters _lowerCAmelCase , _lowerCAmelCase =reader.remove_nested_coref_mentions(__UpperCamelCase , __UpperCamelCase ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters _lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =reader.get_mention_assignments(__UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase =(key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( """Number of removed nested coreferring mentions in the key """ F'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' ) logger.info( """Number of resulting singleton clusters in the key """ F'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' ) if not keep_singletons: logger.info( F'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ''' """files, respectively""" ) return doc_coref_infos def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int: _lowerCAmelCase =get_coref_infos(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) _lowerCAmelCase ={} _lowerCAmelCase =0 _lowerCAmelCase =0 for name, metric in metrics: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =evaluator.evaluate_documents(__UpperCamelCase , __UpperCamelCase , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({F'''{name}/recall''': recall, F'''{name}/precision''': precision, F'''{name}/f1''': fa} ) logger.info( name.ljust(10 ) , F'''Recall: {recall * 100:.2f}''' , F''' Precision: {precision * 100:.2f}''' , F''' F1: {fa * 100:.2f}''' , ) if conll_subparts_num == 3: _lowerCAmelCase =(conll / 3) * 100 logger.info(F'''CoNLL score: {conll:.2f}''' ) output_scores.update({"""conll_score""": conll} ) return output_scores def _lowerCamelCase(__UpperCamelCase ) -> Tuple: _lowerCAmelCase =False for line in key_lines: if not line.startswith("""#""" ): if len(line.split() ) > 6: _lowerCAmelCase =line.split()[5] if not parse_col == "-": _lowerCAmelCase =True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): '''simple docstring''' def _lowerCAmelCase ( self ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Sequence(datasets.Value("""string""" ) ), } ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[ """https://github.com/ns-moosavi/coval""", """https://www.aclweb.org/anthology/P16-1060""", """http://www.conll.cemantix.org/2012/data.html""", ] , ) def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False ) -> Optional[Any]: _lowerCAmelCase =[ ("""mentions""", evaluator.mentions), ("""muc""", evaluator.muc), ("""bcub""", evaluator.b_cubed), ("""ceafe""", evaluator.ceafe), ("""lea""", evaluator.lea), ] if min_span: _lowerCAmelCase =util.check_gold_parse_annotation(__UpperCAmelCase ) if not has_gold_parse: raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" _lowerCAmelCase =evaluate( key_lines=__UpperCAmelCase , sys_lines=__UpperCAmelCase , metrics=__UpperCAmelCase , NP_only=__UpperCAmelCase , remove_nested=__UpperCAmelCase , keep_singletons=__UpperCAmelCase , min_span=__UpperCAmelCase , ) return score
341
1
"""simple docstring""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __A = logging.get_logger(__name__) def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase=False ) -> List[Any]: _lowerCAmelCase =OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith("""head""" ): _lowerCAmelCase ="""segformer.encoder.""" + key if key.startswith("""backbone""" ): _lowerCAmelCase =key.replace("""backbone""" , """segformer.encoder""" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 _lowerCAmelCase =key[key.find("""patch_embed""" ) + len("""patch_embed""" )] _lowerCAmelCase =key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(__UpperCamelCase )-1}''' ) if "norm" in key: _lowerCAmelCase =key.replace("""norm""" , """layer_norm""" ) if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 _lowerCAmelCase =key[key.find("""segformer.encoder.layer_norm""" ) + len("""segformer.encoder.layer_norm""" )] _lowerCAmelCase =key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(__UpperCamelCase )-1}''' ) if "layer_norm1" in key: _lowerCAmelCase =key.replace("""layer_norm1""" , """layer_norm_1""" ) if "layer_norm2" in key: _lowerCAmelCase =key.replace("""layer_norm2""" , """layer_norm_2""" ) if "block" in key: # replace for example block1 by block.0 _lowerCAmelCase =key[key.find("""block""" ) + len("""block""" )] _lowerCAmelCase =key.replace(F'''block{idx}''' , F'''block.{int(__UpperCamelCase )-1}''' ) if "attn.q" in key: _lowerCAmelCase =key.replace("""attn.q""" , """attention.self.query""" ) if "attn.proj" in key: _lowerCAmelCase =key.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in key: _lowerCAmelCase =key.replace("""attn""" , """attention.self""" ) if "fc1" in key: _lowerCAmelCase =key.replace("""fc1""" , """dense1""" ) if "fc2" in key: _lowerCAmelCase =key.replace("""fc2""" , """dense2""" ) if "linear_pred" in key: _lowerCAmelCase =key.replace("""linear_pred""" , """classifier""" ) if "linear_fuse" in key: _lowerCAmelCase =key.replace("""linear_fuse.conv""" , """linear_fuse""" ) _lowerCAmelCase =key.replace("""linear_fuse.bn""" , """batch_norm""" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 _lowerCAmelCase =key[key.find("""linear_c""" ) + len("""linear_c""" )] _lowerCAmelCase =key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(__UpperCamelCase )-1}''' ) if key.startswith("""head""" ): _lowerCAmelCase =key.replace("""head""" , """classifier""" ) _lowerCAmelCase =value return new_state_dict def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase ) -> Dict: # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) _lowerCAmelCase =state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' ) _lowerCAmelCase =state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict _lowerCAmelCase =kv_weight[ : config.hidden_sizes[i], : ] _lowerCAmelCase =kv_bias[: config.hidden_sizes[i]] _lowerCAmelCase =kv_weight[ config.hidden_sizes[i] :, : ] _lowerCAmelCase =kv_bias[ config.hidden_sizes[i] : ] def _lowerCamelCase() -> List[Any]: _lowerCAmelCase ="""http://images.cocodataset.org/val2017/000000039769.jpg""" _lowerCAmelCase =Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ) return image @torch.no_grad() def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Dict: _lowerCAmelCase =SegformerConfig() _lowerCAmelCase =False # set attributes based on model_name _lowerCAmelCase ="""huggingface/label-files""" if "segformer" in model_name: _lowerCAmelCase =model_name[len("""segformer.""" ) : len("""segformer.""" ) + 2] if "ade" in model_name: _lowerCAmelCase =150 _lowerCAmelCase ="""ade20k-id2label.json""" _lowerCAmelCase =(1, 150, 128, 128) elif "city" in model_name: _lowerCAmelCase =19 _lowerCAmelCase ="""cityscapes-id2label.json""" _lowerCAmelCase =(1, 19, 128, 128) else: raise ValueError(F'''Model {model_name} not supported''' ) elif "mit" in model_name: _lowerCAmelCase =True _lowerCAmelCase =model_name[4:6] _lowerCAmelCase =1000 _lowerCAmelCase ="""imagenet-1k-id2label.json""" _lowerCAmelCase =(1, 1000) else: raise ValueError(F'''Model {model_name} not supported''' ) # set config attributes _lowerCAmelCase =json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) , """r""" ) ) _lowerCAmelCase ={int(__UpperCamelCase ): v for k, v in idalabel.items()} _lowerCAmelCase =idalabel _lowerCAmelCase ={v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": _lowerCAmelCase =[64, 128, 320, 512] _lowerCAmelCase =256 elif size == "b2": _lowerCAmelCase =[64, 128, 320, 512] _lowerCAmelCase =768 _lowerCAmelCase =[3, 4, 6, 3] elif size == "b3": _lowerCAmelCase =[64, 128, 320, 512] _lowerCAmelCase =768 _lowerCAmelCase =[3, 4, 18, 3] elif size == "b4": _lowerCAmelCase =[64, 128, 320, 512] _lowerCAmelCase =768 _lowerCAmelCase =[3, 8, 27, 3] elif size == "b5": _lowerCAmelCase =[64, 128, 320, 512] _lowerCAmelCase =768 _lowerCAmelCase =[3, 6, 40, 3] else: raise ValueError(F'''Size {size} not supported''' ) # load image processor (only resize + normalize) _lowerCAmelCase =SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=__UpperCamelCase , align=__UpperCamelCase , do_random_crop=__UpperCamelCase ) # prepare image _lowerCAmelCase =prepare_img() _lowerCAmelCase =image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).pixel_values logger.info(F'''Converting model {model_name}...''' ) # load original state dict if encoder_only: _lowerCAmelCase =torch.load(__UpperCamelCase , map_location=torch.device("""cpu""" ) ) else: _lowerCAmelCase =torch.load(__UpperCamelCase , map_location=torch.device("""cpu""" ) )["""state_dict"""] # rename keys _lowerCAmelCase =rename_keys(__UpperCamelCase , encoder_only=__UpperCamelCase ) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(__UpperCamelCase , __UpperCamelCase ) # create HuggingFace model and load state dict if encoder_only: _lowerCAmelCase =False _lowerCAmelCase =SegformerForImageClassification(__UpperCamelCase ) else: _lowerCAmelCase =SegformerForSemanticSegmentation(__UpperCamelCase ) model.load_state_dict(__UpperCamelCase ) model.eval() # forward pass _lowerCAmelCase =model(__UpperCamelCase ) _lowerCAmelCase =outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": _lowerCAmelCase =torch.tensor( [ [[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]], [[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]], [[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": _lowerCAmelCase =torch.tensor( [ [[-7.58_20, -8.72_31, -8.32_15], [-8.06_00, -10.35_29, -10.03_04], [-7.52_08, -9.41_03, -9.62_39]], [[-12.69_18, -13.89_94, -13.71_37], [-13.31_96, -15.75_23, -15.47_89], [-12.93_43, -14.87_57, -14.96_89]], [[-11.19_11, -11.94_21, -11.32_43], [-11.33_42, -13.68_39, -13.35_81], [-10.39_09, -12.18_32, -12.48_58]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": _lowerCAmelCase =torch.tensor( [ [[-11.81_73, -14.38_50, -16.31_28], [-14.56_48, -16.58_04, -18.65_68], [-14.72_23, -15.73_87, -18.42_18]], [[-15.72_90, -17.91_71, -19.44_23], [-18.31_05, -19.94_48, -21.46_61], [-17.92_96, -18.64_97, -20.79_10]], [[-15.07_83, -17.03_36, -18.27_89], [-16.87_71, -18.68_70, -20.16_12], [-16.24_54, -17.14_26, -19.50_55]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": _lowerCAmelCase =torch.tensor( [ [[-9.08_78, -10.20_81, -10.18_91], [-9.31_44, -10.79_41, -10.98_43], [-9.22_94, -10.38_55, -10.57_04]], [[-12.23_16, -13.90_68, -13.61_02], [-12.91_61, -14.37_02, -14.32_35], [-12.52_33, -13.71_74, -13.79_32]], [[-14.62_75, -15.24_90, -14.97_27], [-14.34_00, -15.96_87, -16.28_27], [-14.14_84, -15.40_33, -15.89_37]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": _lowerCAmelCase =torch.tensor( [ [[-12.31_44, -13.24_47, -14.08_02], [-13.36_14, -14.58_16, -15.61_17], [-13.33_40, -14.44_33, -16.22_19]], [[-19.27_81, -20.41_28, -20.75_06], [-20.61_53, -21.65_66, -22.09_98], [-19.98_00, -21.04_30, -22.14_94]], [[-18.87_39, -19.78_04, -21.18_34], [-20.12_33, -21.67_65, -23.29_44], [-20.03_15, -21.26_41, -23.69_44]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": _lowerCAmelCase =torch.tensor( [ [[-9.55_24, -12.08_35, -11.73_48], [-10.52_29, -13.64_46, -14.56_62], [-9.58_42, -12.88_51, -13.94_14]], [[-15.34_32, -17.53_23, -17.08_18], [-16.33_30, -18.92_55, -19.21_01], [-15.13_40, -17.78_48, -18.39_71]], [[-12.60_72, -14.94_86, -14.66_31], [-13.76_29, -17.09_07, -17.77_45], [-12.78_99, -16.16_95, -17.16_71]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": _lowerCAmelCase =torch.tensor( [ [[-11.92_95, -13.40_57, -14.81_06], [-13.34_31, -14.81_79, -15.37_81], [-14.28_36, -15.59_42, -16.15_88]], [[-11.49_06, -12.80_67, -13.65_64], [-13.11_89, -14.05_00, -14.15_43], [-13.87_48, -14.51_36, -14.87_89]], [[0.53_74, 0.10_67, -0.47_42], [0.11_41, -0.22_55, -0.70_99], [-0.30_00, -0.59_24, -1.31_05]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": _lowerCAmelCase =torch.tensor( [ [[-7.82_17, -9.87_67, -10.17_17], [-9.44_38, -10.90_58, -11.40_47], [-9.79_39, -12.34_95, -12.10_79]], [[-7.15_14, -9.53_36, -10.08_60], [-9.77_76, -11.68_22, -11.84_39], [-10.14_11, -12.76_55, -12.89_72]], [[0.30_21, 0.08_05, -0.23_10], [-0.03_28, -0.16_05, -0.27_14], [-0.14_08, -0.54_77, -0.69_76]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": _lowerCAmelCase =torch.tensor( [ [ [-1.1_3_7_2E0_1, -1.2_7_8_7E0_1, -1.3_4_7_7E0_1], [-1.2_5_3_6E0_1, -1.4_1_9_4E0_1, -1.4_4_0_9E0_1], [-1.3_2_1_7E0_1, -1.4_8_8_8E0_1, -1.5_3_2_7E0_1], ], [ [-1.4_7_9_1E0_1, -1.7_1_2_2E0_1, -1.8_2_7_7E0_1], [-1.7_1_6_3E0_1, -1.9_1_9_2E0_1, -1.9_5_3_3E0_1], [-1.7_8_9_7E0_1, -1.9_9_9_1E0_1, -2.0_3_1_5E0_1], ], [ [7.6_7_2_3E-0_1, 4.1_9_2_1E-0_1, -7.7_8_7_8E-0_2], [4.7_7_7_2E-0_1, 9.5_5_5_7E-0_3, -2.8_0_8_2E-0_1], [3.6_0_3_2E-0_1, -2.4_8_2_6E-0_1, -5.1_1_6_8E-0_1], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": _lowerCAmelCase =torch.tensor( [ [[-9.49_59, -11.30_87, -11.74_79], [-11.00_25, -12.65_40, -12.33_19], [-11.40_64, -13.04_87, -12.99_05]], [[-9.89_05, -11.30_84, -12.08_54], [-11.17_26, -12.76_98, -12.95_83], [-11.59_85, -13.32_78, -14.17_74]], [[0.22_13, 0.01_92, -0.24_66], [-0.17_31, -0.42_13, -0.48_74], [-0.31_26, -0.65_41, -1.13_89]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": _lowerCAmelCase =torch.tensor( [ [[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]], [[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]], [[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": _lowerCAmelCase =torch.tensor( [ [[-16.09_76, -16.48_56, -17.39_62], [-16.62_34, -19.03_42, -19.76_85], [-16.09_00, -18.06_61, -19.11_80]], [[-18.47_50, -18.84_88, -19.50_74], [-19.40_30, -22.15_70, -22.59_77], [-19.11_91, -20.84_86, -22.37_83]], [[-4.51_78, -5.50_37, -6.51_09], [-5.08_84, -7.21_74, -8.03_34], [-4.41_56, -5.81_17, -7.29_70]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": _lowerCAmelCase =torch.tensor( [ [[-14.20_81, -14.47_32, -14.19_77], [-14.58_67, -16.44_23, -16.63_56], [-13.44_41, -14.96_85, -16.86_96]], [[-14.45_76, -14.70_73, -15.04_51], [-15.08_16, -17.62_37, -17.98_73], [-14.42_13, -16.01_99, -18.59_92]], [[-4.73_49, -4.95_88, -5.09_66], [-4.32_10, -6.93_25, -7.25_91], [-3.43_12, -4.74_84, -7.19_17]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": _lowerCAmelCase =torch.tensor( [ [[-11.77_37, -11.95_26, -11.32_73], [-13.66_92, -14.45_74, -13.88_78], [-13.89_37, -14.69_24, -15.93_45]], [[-14.67_06, -14.53_30, -14.13_06], [-16.15_02, -16.81_80, -16.42_69], [-16.83_38, -17.89_39, -20.17_46]], [[1.04_91, 0.82_89, 1.03_10], [1.10_44, 0.52_19, 0.80_55], [1.08_99, 0.69_26, 0.55_90]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": _lowerCAmelCase =torch.tensor( [ [[-12.56_41, -13.47_77, -13.06_84], [-13.95_87, -15.89_83, -16.65_57], [-13.31_09, -15.73_50, -16.31_41]], [[-14.70_74, -15.43_52, -14.59_44], [-16.63_53, -18.16_63, -18.61_20], [-15.17_02, -18.03_29, -18.15_47]], [[-1.79_90, -2.09_51, -1.77_84], [-2.63_97, -3.82_45, -3.96_86], [-1.52_64, -2.81_26, -2.93_16]], ] ) else: _lowerCAmelCase =logits.argmax(-1 ).item() print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] ) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , __UpperCamelCase , atol=1E-2 ) # finally, save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) model.save_pretrained(__UpperCamelCase ) image_processor.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( '--model_name', default='segformer.b0.512x512.ade.160k', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) __A = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
341
"""simple docstring""" from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class lowerCamelCase__ : '''simple docstring''' lowerCamelCase = XGLMConfig lowerCamelCase = {} lowerCamelCase = '''gelu''' def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=0.0_2 , ) -> List[str]: _lowerCAmelCase =parent _lowerCAmelCase =batch_size _lowerCAmelCase =seq_length _lowerCAmelCase =is_training _lowerCAmelCase =use_input_mask _lowerCAmelCase =use_labels _lowerCAmelCase =vocab_size _lowerCAmelCase =d_model _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads _lowerCAmelCase =ffn_dim _lowerCAmelCase =activation_function _lowerCAmelCase =activation_dropout _lowerCAmelCase =attention_dropout _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =initializer_range _lowerCAmelCase =None _lowerCAmelCase =0 _lowerCAmelCase =2 _lowerCAmelCase =1 def _lowerCAmelCase ( self ) -> Dict: return XGLMConfig.from_pretrained("""facebook/xglm-564M""" ) def _lowerCAmelCase ( self ) -> str: _lowerCAmelCase =tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) _lowerCAmelCase =None if self.use_input_mask: _lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase =self.get_config() _lowerCAmelCase =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def _lowerCAmelCase ( self ) -> str: return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__UpperCAmelCase , ) def _lowerCAmelCase ( self ) -> Dict: _lowerCAmelCase =self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) =config_and_inputs _lowerCAmelCase ={ """input_ids""": input_ids, """head_mask""": head_mask, } return config, inputs_dict @require_tf class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): '''simple docstring''' lowerCamelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () lowerCamelCase = (TFXGLMForCausalLM,) if is_tf_available() else () lowerCamelCase = ( {'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {} ) lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =TFXGLMModelTester(self ) _lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase , n_embd=37 ) def _lowerCAmelCase ( self ) -> int: self.config_tester.run_common_tests() @slow def _lowerCAmelCase ( self ) -> Union[str, Any]: for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase =TFXGLMModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" ) def _lowerCAmelCase ( self ) -> Union[str, Any]: super().test_resize_token_embeddings() @require_tf class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def _lowerCAmelCase ( self , __UpperCAmelCase=True ) -> str: _lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase =tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off _lowerCAmelCase =[2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81] # fmt: on _lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCAmelCase ) @slow def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) tf.random.set_seed(0 ) _lowerCAmelCase =tokenizer("""Today is a nice day and""" , return_tensors="""tf""" ) _lowerCAmelCase =tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(""":/CPU:0""" ): _lowerCAmelCase =model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , seed=[7, 0] ) _lowerCAmelCase =tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =( """Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due""" ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) @slow def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase =XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase ="""left""" # use different length sentences to test batching _lowerCAmelCase =[ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When""", """Hello, my dog is a little""", ] _lowerCAmelCase =tokenizer(__UpperCAmelCase , return_tensors="""tf""" , padding=__UpperCAmelCase ) _lowerCAmelCase =inputs["""input_ids"""] _lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 ) _lowerCAmelCase =tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids _lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 ) _lowerCAmelCase =tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids _lowerCAmelCase =model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 ) _lowerCAmelCase =tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase ) _lowerCAmelCase =[ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """ """a single""", """Hello, my dog is a little bit of a shy one, but he is very friendly""", ] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
341
1